1 Document Setup & Libraries

options(width = 10)
library(formattable)
library(DataExplorer)
library(esquisse)
library(car)
library(MASS)
library(ggfortify)
library(rpivotTable)
library(readxl)
library(data.table)
library(reshape2)
library(DescTools)
library(emmeans)
library(jmv)
library(ggthemes)
library(wesanderson)
library(ggpubr)
library(kableExtra)
library(apaTables)
library(gridExtra)
library(xlsx)
library(gganimate)
library(psych)
library(tidyverse)
library(magrittr)
library(sjstats)
library(jtools)
library(ggstance)
library(caret)
library(rmarkdown)
library(pander)

2 Functions

#  changes Session.ID where ID was mis-entered or had to be changed
fix.id <- function(x) {
  fixd <- case_when(
    x == "pb0401" ~ "pilotb0402",
    x == "pilotb0829" ~ "pb0830",
    x == "pilota1017" ~ "nb1018",
    !x %in% c("pb0401", "pilotb0829", "pilota1017") ~ x
  )
  return(tolower(fixd))
}

#  Reading and formatting individual SuperLab files
read.sl <- function(files) {
  the.data <- read_delim(files,
    "\t",
    escape_double = FALSE,
    col_names = FALSE,
    col_types = cols(X5 = col_double()),
    na = "empty",
    trim_ws = TRUE,
    skip = 6
  )
  if (length(colnames(the.data)) == 33) {
    the.data$X10 <- NULL
    colnames(the.data) <- paste0(rep("X", 32), 1:32)
  }
  colnames(the.data) <- sl.cols
  data.frame(the.data)
}

#  Return valence and word type of word

read.csv("./rawdata/Positive Emotion Label List.csv")
##          x
## 1  aroused
## 2    brave
## 3    cheer
## 4  delight
## 5   desire
## 6  devoted
## 7  ecstasy
## 8   elated
## 9  freedom
## 10     fun
## 11   happy
## 12  honest
## 13 hopeful
## 14     joy
## 15    kind
## 16   loyal
## 17    lust
## 18   pride
## 19 respect
## 20  secure
pos.list <- c(as.vector(read.csv("./rawdata/Positive Emotion Label List.csv")[, 1]), as.vector(read.csv("./rawdata/Positive Emotion Laden List.csv")[, 1]))
neg.list <- c(as.vector(read.csv("./rawdata/Negative Emotion Label List.csv")[, 1]), as.vector(read.csv("./rawdata/Negative Emotion Laden List.csv")[, 1]))
neu.list <- c(as.vector(read.csv("./rawdata/Neutral.A List.csv")[, 1]), as.vector(read.csv("./rawdata/Neutral.B List.csv")[, 1]))
e.list <- c(as.vector(read.csv("./rawdata/Positive Emotion Label List.csv")[, 1]), as.vector(read.csv("./rawdata/Negative Emotion Label List.csv")[, 1]))
el.list <- c(as.vector(read.csv("./rawdata/Positive Emotion Laden List.csv")[, 1]), as.vector(read.csv("./rawdata/Negative Emotion Laden List.csv")[, 1]))

get.val <- function(x) {
  valence <- ifelse(x %in% pos.list, "Positive", ifelse(x %in% neg.list, "Negative", ifelse(x %in% neu.list, "Neutral", "None")))
  return(valence)
}
get.wt <- function(x) {
  wt <- ifelse(x %in% e.list, "Emotion-Label", ifelse(x %in% el.list, "Emotion-Laden", ifelse(x %in% neu.list, "Neutral", "None")))
  return(wt)
}

#  Remove trials which had all NRs due to Superlab error
remove.problem.trials <- function(x) {
  trials.to.remove <- c("elated...comfort...puppy", "fun...kiss...victory", "joy...fun...cheer", "pride...jewel...tamper")
  trimmed <- subset(x, !x$Trial.ID %in% trials.to.remove)
  return(trimmed)
}

#  Recode factors to numeric levels for aggregation
fac.to.num <- function(f) {
  return(as.numeric(as.factor(f)))
}

3 Import BDI & STAI Data

#  Import inventory data and score----

#  Reverse scored items
stai.rev <- c(23, 24, 27, 30, 32, 33, 37, 38, 41, 42, 43, 45, 48, 49, 52, 55, 56, 58, 61)

inventories <- read_excel("./rawdata/inventories.xlsx", col_types = c("text", rep("numeric", 61)))
inventories[, stai.rev] <- 5 - inventories[, stai.rev]

#  Rename & fix ID discrepancies
#  Score inventories and categorize based on cutoffs
bdi.cut <- 13
stai.cut <- 43

inventories <- inventories %>%
  rename(., Session.ID = "id") %>%
  mutate(.,
    Session.ID = fix.id(.$Session.ID),
    bdi.tot = rowSums(.[, 2:22]),
    stai.y1.tot = rowSums(.[, 23:42]),
    stai.y2.tot = rowSums(.[, 43:62])
  ) %>%
  mutate(.,
    "bdi" = ifelse(.$bdi.tot >= bdi.cut, "Scoring", "Non-Scoring"),
    "stai.y1" = ifelse(.$stai.y1.tot >= stai.cut, "Scoring", "Non-Scoring"),
    "stai.y2" = ifelse(.$stai.y2.tot >= stai.cut, "Scoring", "Non-Scoring")
  ) %>%
  mutate(.,
    mood.state.2 = ifelse(.$bdi == "Scoring" | .$stai.y1 == "Scoring" | .$stai.y2 == "Scoring", "Scoring", "Non-Scoring"),
    mood.state.2.y1 = ifelse(.$bdi == "Scoring" | .$stai.y1 == "Scoring", "Scoring", "Non-Scoring"),
    mood.state.2.y2 = ifelse(.$bdi == "Scoring" | .$stai.y2 == "Scoring", "Scoring", "Non-Scoring"),
    mood.state.4.y1 = case_when(
      .$bdi == "Scoring" & .$stai.y1 == "Non-Scoring" ~ "BDI Only",
      .$bdi == "Non-Scoring" & .$stai.y1 == "Scoring" ~ "STAI Only",
      .$bdi == "Scoring" & .$stai.y1 == "Scoring" ~ "BDI & STAI",
      .$bdi == "Non-Scoring" & .$stai.y1 == "Non-Scoring" ~ "Non-Scoring"
    ),
    mood.state.4.y2 = case_when(
      .$bdi == "Scoring" & .$stai.y2 == "Non-Scoring" ~ "BDI Only",
      .$bdi == "Non-Scoring" & .$stai.y2 == "Scoring" ~ "STAI Only",
      .$bdi == "Scoring" & .$stai.y2 == "Scoring" ~ "BDI & STAI",
      .$bdi == "Non-Scoring" & .$stai.y2 == "Non-Scoring" ~ "Non-Scoring"
    ),
    mood.state.4 = case_when(
      .$bdi == "Scoring" & (.$stai.y1 == "Non-Scoring" & .$stai.y2 == "Non-Scoring") ~ "BDI Only",
      .$bdi == "Non-Scoring" & (.$stai.y1 == "Scoring" | .$stai.y2 == "Scoring") ~ "STAI Only",
      .$bdi == "Scoring" & (.$stai.y1 == "Scoring" | .$stai.y2 == "Scoring") ~ "BDI & STAI",
      .$bdi == "Non-Scoring" & (.$stai.y1 == "Non-Scoring" & .$stai.y2 == "Non-Scoring") ~ "Non-Scoring"
    ),
    mood.state.8 = case_when(
      .$bdi == "Scoring" & .$stai.y1 == "Non-Scoring" & .$stai.y2 == "Non-Scoring" ~ "BDI Only",
      .$bdi == "Non-Scoring" & .$stai.y1 == "Scoring" & .$stai.y2 == "Non-Scoring" ~ "STAI Y1 Only",
      .$bdi == "Non-Scoring" & .$stai.y1 == "Non-Scoring" & .$stai.y2 == "Scoring" ~ "STAI Y2 Only",
      .$bdi == "Scoring" & .$stai.y1 == "Scoring" & .$stai.y2 == "Non-Scoring" ~ "BDI & STAI Y1",
      .$bdi == "Scoring" & .$stai.y1 == "Non-Scoring" & .$stai.y2 == "Scoring" ~ "BDI & STAI Y2",
      .$bdi == "Non-Scoring" & .$stai.y1 == "Scoring" & .$stai.y2 == "Scoring" ~ "STAI Y1 & STAI Y2",
      .$bdi == "Scoring" & .$stai.y1 == "Scoring" & .$stai.y2 == "Scoring" ~ "BDI & STAI Y1 & STAI Y2",
      .$bdi == "Non-Scoring" & .$stai.y1 == "Non-Scoring" & .$stai.y2 == "Non-Scoring" ~ "Non-Scoring"
    )
  )

#  Get mood state counts & proportions
paged_table(inventories[, c(1, 63:75)])
sapply(inventories[, 66:75], FUN = table)
## $bdi
## 
## Non-Scoring 
##          48 
##     Scoring 
##          23 
## 
## $stai.y1
## 
## Non-Scoring 
##          54 
##     Scoring 
##          17 
## 
## $stai.y2
## 
## Non-Scoring 
##          42 
##     Scoring 
##          29 
## 
## $mood.state.2
## 
## Non-Scoring 
##          31 
##     Scoring 
##          40 
## 
## $mood.state.2.y1
## 
## Non-Scoring 
##          40 
##     Scoring 
##          31 
## 
## $mood.state.2.y2
## 
## Non-Scoring 
##          36 
##     Scoring 
##          35 
## 
## $mood.state.4.y1
## 
##  BDI & STAI 
##           9 
##    BDI Only 
##          14 
## Non-Scoring 
##          40 
##   STAI Only 
##           8 
## 
## $mood.state.4.y2
## 
##  BDI & STAI 
##          17 
##    BDI Only 
##           6 
## Non-Scoring 
##          36 
##   STAI Only 
##          12 
## 
## $mood.state.4
## 
##  BDI & STAI 
##          17 
##    BDI Only 
##           6 
## Non-Scoring 
##          31 
##   STAI Only 
##          17 
## 
## $mood.state.8
## 
## BDI & STAI Y1 & STAI Y2 
##                       9 
##           BDI & STAI Y2 
##                       8 
##                BDI Only 
##                       6 
##             Non-Scoring 
##                      31 
##       STAI Y1 & STAI Y2 
##                       3 
##            STAI Y1 Only 
##                       5 
##            STAI Y2 Only 
##                       9
sapply(sapply(inventories[, 66:75], FUN = table), FUN = prop.table)
## $bdi
## 
## Non-Scoring 
##   0.6760563 
##     Scoring 
##   0.3239437 
## 
## $stai.y1
## 
## Non-Scoring 
##   0.7605634 
##     Scoring 
##   0.2394366 
## 
## $stai.y2
## 
## Non-Scoring 
##   0.5915493 
##     Scoring 
##   0.4084507 
## 
## $mood.state.2
## 
## Non-Scoring 
##   0.4366197 
##     Scoring 
##   0.5633803 
## 
## $mood.state.2.y1
## 
## Non-Scoring 
##   0.5633803 
##     Scoring 
##   0.4366197 
## 
## $mood.state.2.y2
## 
## Non-Scoring 
##   0.5070423 
##     Scoring 
##   0.4929577 
## 
## $mood.state.4.y1
## 
##  BDI & STAI 
##   0.1267606 
##    BDI Only 
##   0.1971831 
## Non-Scoring 
##   0.5633803 
##   STAI Only 
##   0.1126761 
## 
## $mood.state.4.y2
## 
##  BDI & STAI 
##  0.23943662 
##    BDI Only 
##  0.08450704 
## Non-Scoring 
##  0.50704225 
##   STAI Only 
##  0.16901408 
## 
## $mood.state.4
## 
##  BDI & STAI 
##  0.23943662 
##    BDI Only 
##  0.08450704 
## Non-Scoring 
##  0.43661972 
##   STAI Only 
##  0.23943662 
## 
## $mood.state.8
## 
## BDI & STAI Y1 & STAI Y2 
##              0.12676056 
##           BDI & STAI Y2 
##              0.11267606 
##                BDI Only 
##              0.08450704 
##             Non-Scoring 
##              0.43661972 
##       STAI Y1 & STAI Y2 
##              0.04225352 
##            STAI Y1 Only 
##              0.07042254 
##            STAI Y2 Only 
##              0.12676056

4 Import SAM Ratings

#  Import ANEW ratings----
anew.elp.full <- read_xlsx("./rawdata/stimuli worksheet.xlsx",
  sheet = 6,
  col_names = TRUE,
  col_types = c("text", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "text"),
  trim_ws = TRUE
)
anew.elp <- rename(anew.elp.full[, c(1, 2, 3, 4, 5, 6, 10, 13, 14)], Target = Word)
paged_table(anew.elp)
#  SAM lists----
pilot.sam.list <- as.vector(read.csv(file = "./rawdata/Pilot SAM List.txt", header = FALSE)[, 1])
pos.sam.list <- as.vector(read.csv(file = "./rawdata/Positive SAM List.txt", header = FALSE)[, 1])
neg.sam.list <- as.vector(read.csv(file = "./rawdata/Negative SAM List.txt", header = FALSE)[, 1])
pilot.sam.cols <- as.vector(rbind(paste(pilot.sam.list, rep("Valence", 80)), paste(pilot.sam.list, rep("Arousal", 80))))
pos.sam.cols <- as.vector(rbind(paste(pos.sam.list, rep("Valence", 80)), paste(pos.sam.list, rep("Arousal", 80))))
neg.sam.cols <- as.vector(rbind(paste(neg.sam.list, rep("Valence", 80)), paste(neg.sam.list, rep("Arousal", 80))))


#  SAM raw data import----
qual.file <- "./rawdata/Qualtrics data.csv"
sam.raw <- read.csv(
  file = qual.file,
  header = TRUE, skip = 2, colClasses = c(
    rep("character", 4),
    rep("numeric", 2),
    rep("character", 12),
    rep("numeric", 160),
    "character",
    "numeric",
    rep("character", 3),
    rep("numeric", 160),
    rep("numeric", 160)
  ),
  col.names = as.vector(colnames(read.csv(
    file = qual.file,
    header = TRUE
  ))), na.strings = ""
)
#  Reassigning some group names that recorded in a different form from the rest
sam.raw$Q1.7[sam.raw$Q1.7 == 3] <- "PA"
sam.raw$Q1.7[sam.raw$Q1.7 == 4] <- "PB"

#  SAM ratings from PilotA and PilotB groups
sam.pilot <- sam.raw[sam.raw$Q1.7 == "Pilot A" | sam.raw$Q1.7 == "Pilot B", c(9, 18, (grep("Q7", colnames(sam.raw))), grep("Q2", colnames(sam.raw)))]
colnames(sam.pilot) <- c("QID", "Group", "PCode", "Age", "Gender", "Handedness", "DHOH", pilot.sam.cols)
sam.pilot[, 8:167] <- 10 - sam.pilot[, 8:167]

#  SAM ratings from PA and PB groups
sam.pos <- sam.raw[sam.raw$Q1.7 == "PA" | sam.raw$Q1.7 == "PB", c(9, 18, (grep("Q7", colnames(sam.raw))), grep("Q3", colnames(sam.raw)))]
colnames(sam.pos) <- c("QID", "Group", "PCode", "Age", "Gender", "Handedness", "DHOH", pos.sam.cols)
sam.pos[, 8:167] <- 10 - sam.pos[, 8:167]

#  SAM ratings from NA and NB groups
sam.neg <- sam.raw[sam.raw$Q1.7 == "NA" | sam.raw$Q1.7 == "NB", c(9, 18, (grep("Q7", colnames(sam.raw))), grep("Q4", colnames(sam.raw)))]
colnames(sam.neg) <- c("QID", "Group", "PCode", "Age", "Gender", "Handedness", "DHOH", neg.sam.cols)
sam.neg[, 8:167] <- 10 - sam.neg[, 8:167]

#  Combined SAM rating table with all groups
sam <- bind_rows(sam.pilot, sam.pos, sam.neg)

#  Fix column names and demographic responses
f <- c("female", "Female", "Woman")
m <- c("male", "Male")
dhoh <- c("hard of hearing", "Deaf")
ndhoh <- c("n/a", "Neither", "no", "No", "NO", "hearing", "hearing ", "None", "none")

sam <- sam %>%
  rename(., Session.ID = PCode, Hearing.Status = DHOH) %>%
  mutate(.,
    Session.ID = fix.id(.$Session.ID),
    Gender = ifelse(sam$Gender %in% m, "Male", ifelse(sam$Gender %in% f, "Female", "Non-Binary")),
    Hearing.Status = ifelse(sam$DHOH %in% dhoh, "Deaf/Hard-of-Hearing", "Hearing")
  )
paged_table(sam)

5 Import SuperLab Data

#  Read column names----
sl.cols <- gsub("NA", "", paste(
  read.delim(
    file = "./rawdata/timingtest2.txt", nrows = 2, skip = 3,
    header = FALSE, na.strings = "", colClasses = rep("character", 32)
  )[1, ],
  read.delim(
    file = "./rawdata/timingtest2.txt", nrows = 2, skip = 3,
    header = FALSE, na.strings = "", colClasses = rep("character", 32)
  )[2, ]
))[c(1:9, 11:33)]

#  define column classes, list of superlab files, and session IDs
sl.class <- c(rep("character", 4), "numeric", "character", "numeric", "character", "numeric", "numeric", rep("character", 8), "numeric", rep("character", 14))
sl.files <- list.files("./rawdata")[grep("\\d{4}\\.txt", list.files("./rawdata"))] %>% paste0("./rawdata/",.)
sl.names <- gsub(".txt", "", sl.files)

#  Read in all files and combine into a single table
sl.l <- lapply(sl.files, FUN = read.sl)
sl.raw <- bind_rows(sl.l)[c(1:11, 16:18, 25:32)]

paged_table(sl.raw)

6 SuperLab Data Formatting and Variable Creation

sl.base <- sl.raw %>%
  mutate(., #  Fix some participant group names and session IDs that were mis-input
    Participant.Group = ifelse(.$Participant.Group == "NA", "n_a", ifelse(.$Participant.Group == "NB", "n_b", tolower(.$Participant.Group))),
    Session.ID = fix.id(.$Session.ID),
    Participant.Name = .$Session.ID
  ) %>%
  filter(
    ., #  Remove unnecessary RT entries
    .$Event.Name != "Fixation",
    .$Event.Name != "Fixation Cross 1s7",
    .$Event.Name != "Start Screen"
  ) %>%
  mutate(., #  Extract words from each trial and assign them to respective place in trial (target, pair1, pair2)
    Word = substr(gsub("^.*?, ", "", .$Event.Name), 0, nchar(gsub("^.*?, ", "", .$Event.Name)) - 1)
  ) %>%
  mutate(
    Target = lag(.$Word, 2),
    Pair1 = lag(.$Word),
    Pair2 = .$Word
  ) %>%
  mutate(., #  Get unique ID for each trial and valence/wt for all stimuli words
    Trial.ID = paste0(.$Target, "...", .$Pair1, "...", .$Pair2),
    Target.V = get.val(.$Target),
    Pair1.V = get.val(.$Pair1),
    Pair2.V = get.val(.$Pair2),
    Target.WT = get.wt(.$Target),
    Pair1.WT = get.wt(.$Pair1),
    Pair2.WT = get.wt(.$Pair2)
  ) %>%
  mutate(., #  Get valence and WT of stimuli from previous trial
    Previous.Target.V = lag(.$Target.V),
    Previous.Pair1.V = lag(.$Pair1.V),
    Previous.Pair2.V = lag(.$Pair2.V),
    Previous.Target.WT = lag(.$Target.WT),
    Previous.Pair1.WT = lag(.$Pair1.WT),
    Previous.Pair2.WT = lag(.$Pair2.WT)
  ) %>%
  filter(
    ., #  Remove unnecessary RT entries and DHOH participants
    str_detect(.$Event.Name, "^Pair 2")
  ) %>%
  mutate(., #  Adjusting RTs based on SuperLab RTs starting from priming/satiation, not task
    Reaction.Time = case_when(
      .$X.Primed.Satiated == "Satiated" & .$Reaction.Time >= 25000 ~ .$Reaction.Time - (23830 + 1070),
      .$X.Primed.Satiated == "Satiated" & .$Reaction.Time < 25000 ~ .$Reaction.Time,
      .$X.Primed.Satiated == "Primed" & .$Reaction.Time >= 3000 ~ .$Reaction.Time - (3012 + 1070),
      .$X.Primed.Satiated == "Primed" & .$Reaction.Time < 3000 ~ .$Reaction.Time
    )
  ) %>%
  mutate(., Reaction.Time = ifelse(.$Reaction.Time == 0, NA, .$Reaction.Time)) %>% #  Recode 0s to NAs for no response trials
  filter(., .$Reaction.Time >= 500 | is.na(.$Reaction.Time)) #  Remove reaction times less than 500ms, leaving NAs for no responses

paged_table(sl.base)

7 Merging SuperLab, SAM, and Inventory Data & Defining Pipeline to Format and Derive Variables from sl.merged

sl.merged <- sl.base %>%
  merge(., select(inventories, c(Session.ID, bdi.tot:mood.state.8)), by = "Session.ID") %>% #  merge with inventory data
  merge(., select(sam, c(Session.ID:"scared Valence")), by = "Session.ID") %>% #  merge with SAM data
  filter(., .$Block.Name != "Practice") #  remove all practice trials

sl.pipe <- function(sldf) {
  sldf %>%
    select(., c(Session.ID, Participant.Group, Block.Name, Participant.Response, Correct.Response, Error.Code, Reaction.Time, X.Primed.Satiated, Target:Hearing.Status)) %>% #  keep the relevant columns
    mutate(.,
      RTCorrect = ifelse(.$Error.Code == "C", .$Reaction.Time, NA), #  Only correct RT
      RTError = ifelse(.$Error.Code == "E", .$Reaction.Time, NA), #  only incorrect RT
      Word.Type.n = ifelse(.$Target.WT == "Neutral", NA, .$Target.WT), #  WT excluding neutral
      Valence.n = ifelse(.$Target.V == "Neutral", NA, .$Target.V), #  Valence excluding neutral
      Pair.MM = ifelse(.$Pair1.V == .$Pair2.V, "Match", "Mismatch"), #  valence relationship between pair1 and pair2
      Pair.WT = case_when( #  word type combination of each pair, including order
        .$Pair1.WT == "Emotion-Label" & .$Pair2.WT == "Emotion-Label" ~ "Emotion-Label",
        .$Pair1.WT == "Emotion-Laden" & .$Pair2.WT == "Emotion-Laden" ~ "Emotion-Laden",
        .$Pair1.WT == "Neutral" & .$Pair2.WT == "Neutral" ~ "Neutral",
        .$Pair1.WT == "Emotion-Label" & .$Pair2.WT == "Emotion-Laden" ~ "Mixed E.EL",
        .$Pair1.WT == "Emotion-Laden" & .$Pair2.WT == "Emotion-Label" ~ "Mixed EL.E",
        .$Pair1.WT == "Emotion-Label" & .$Pair2.WT == "Neutral" ~ "Mixed E.NEU",
        .$Pair1.WT == "Neutral" & .$Pair2.WT == "Emotion-Label" ~ "Mixed NEU.E",
        .$Pair1.WT == "Neutral" & .$Pair2.WT == "Emotion-Laden" ~ "Mixed NEU.EL",
        .$Pair1.WT == "Emotion-Laden" & .$Pair2.WT == "Neutral" ~ "Mixed EL.NEU"
      )
    ) %>%
    mutate(.,
      Correct.Response = ifelse(.$Pair1.V == .$Pair2.V, "Match", "Mismatch"),
    ) %>%
    mutate(.,
      Error.Code = ifelse(.$Correct.Response == .$Participant.Response, "C", ifelse(.$Participant.Response == "", "NR", "E"))
    ) %>%
    mutate(.,
      Pair.WT.n = ifelse(.$Pair.WT == "Neutral", NA, .$Pair.WT), #  pair wt excluding neutral
      TPRValence = case_when( #  relationship between target and pair word valences
        .$Target.V == .$Pair1.V & .$Target.V == .$Pair2.V ~ "Congruent",
        .$Target.V == .$Pair1.V & .$Target.V != .$Pair2.V ~ "Pair 1 Congruent",
        .$Target.V != .$Pair1.V & .$Target.V == .$Pair2.V ~ "Pair 2 Congruent",
        .$Target.V != .$Pair1.V & .$Target.V != .$Pair2.V ~ "Incongruent"
      ),
      TPRWT = case_when( #  relationship between target and pair word word types
        .$Target.WT == .$Pair1.WT & .$Target.WT == .$Pair2.WT ~ "Congruent",
        .$Target.WT == .$Pair1.WT & .$Target.WT != .$Pair2.WT ~ "Pair 1 Congruent",
        .$Target.WT != .$Pair1.WT & .$Target.WT == .$Pair2.WT ~ "Pair 2 Congruent",
        .$Target.WT != .$Pair1.WT & .$Target.WT != .$Pair2.WT ~ "Incongruent"
      )
    ) %>%
    mutate(.,
      Valence.Congruency = paste0(.$Target.V, ".", .$TPRValence), #  code target valence and valence congruency as one variable
      Word.Type.Congruency = paste0(.$Target.WT, ".", .$TPRWT), #  code target wt and wt congruency as one variable
      Primed.Satiated.MM = paste0(.$X.Primed.Satiated, ".", .$Pair.MM), #  code priming/satiation and task match/mismatch together
      RTC.CongruentV = ifelse(.$Error.Code == "C" & .$TPRValence %in% c("Congruent", "Pair 1 Congruent", "Pair 2 Congruent"), .$Reaction.Time, NA), #  rts from valence congruent trials with correct responses
      RTA.CongruentV = ifelse(.$TPRValence %in% c("Congruent", "Pair 1 Congruent", "Pair 2 Congruent"), .$Reaction.Time, NA) #  all rts from valence congruent trials
    ) %>%
    mutate(.,
      Valence.Congruency.n = ifelse(.$Target.V == "Neutral", NA, .$Valence.Congruency), #  valence congruency excluding neutral targets
      Word.Type.Congruency.n = ifelse(.$Target.WT == "Neutral", NA, .$Word.Type.Congruency) #  wt congruency excluding neutral targets
    ) %>%
    mutate(., Valence.Congruency.Congruent = ifelse(.$TPRValence %in% c("Congruent", "Pair 1 Congruent", "Pair 2 Congruent"), .$Valence.Congruency, NA)) %>% #  valence vongruency with only congruent trials
    mutate(., Valence.Congruency.n.Congruent = ifelse(.$Target.WT == "Neutral", NA, .$Valence.Congruency.Congruent)) %>% #  valence congruency, only congruent and excluding neutral targets
    filter(., .$Hearing.Status == "Hearing") %>% #  get rid of DHOH participants, they differed significantly from the rest in terms of mean RT
    remove.problem.trials(.) %>% #  get rid of misrecorded trials
    return(.)
}

8 SAM Descriptives

#  Demographics Descriptives
formattable(describe(sam$Age))
vars n mean sd median trimmed mad min max range skew kurtosis se
X1 1 77 19.42857 1.922365 19 19.11111 1.4826 18 30 12 2.613894 10.33231 0.2190739
formattable(table(sam$Gender))
## 
##     Female 
##         45 
##       Male 
##         31 
## Non-Binary 
##          1
formattable(table(sam$Handedness))
## 
##  Left-handed 
##            8 
## Right-handed 
##           69
formattable(table(sam$Hearing.Status))
## 
## Deaf/Hard-of-Hearing 
##                    2 
##              Hearing 
##                   75
#  SAM Valence & Arousal Descriptives, Comparisons & Plots
sam.desc <- describe(sam)

sam.desc.val <- sam.desc %>%
  filter(., str_detect(rownames(.), "Valence$")) %>%
  rename(., Target = "vars") %>%
  mutate(., Target = gsub(" Valence", "", rownames(sam.desc)[str_detect(rownames(sam.desc), "Valence$")]))
sam.comp.val <- sam.desc.val %>%
  merge(., select(anew.elp, c(Target, "Valence Mean", "Valence SD")), by = "Target") %>%
  rename(., "Rated.Mean" = mean, "ANEW.Mean" = "Valence Mean", "ANEW.SD" = "Valence SD") %>%
  mutate(., Difference = .$"Rated.Mean" - .$"ANEW.Mean")
plot_histogram(sam.comp.val$Difference)

sam.desc.ar <- sam.desc %>%
  filter(., str_detect(rownames(.), "Arousal$")) %>%
  rename(., Target = "vars") %>%
  mutate(., Target = gsub(" Arousal", "", rownames(sam.desc)[str_detect(rownames(sam.desc), "Arousal$")]))
sam.comp.ar <- sam.desc.ar %>%
  merge(., select(anew.elp, c(Target, "Arousal Mean", "Arousal SD")), by = "Target") %>%
  rename(., "Rated.Mean" = mean, "ANEW.Mean" = "Arousal Mean", "ANEW.SD" = "Arousal SD") %>%
  mutate(., Difference = .$"Rated.Mean" - .$"ANEW.Mean")
plot_histogram(sam.comp.ar$Difference)

paged_table(sam.desc)
#  arousal differences----
range(sam.comp.ar$"ANEW.Mean")
## [1] 3.14
## [2] 8.17
range(sam.comp.ar$"Rated.Mean")
## [1] 3.114286
## [2] 7.523810
range(sam.comp.ar$Difference)
## [1] -3.153333
## [2]  1.188571
ar.diffs <- subset(sam.comp.ar, abs(sam.comp.ar$Difference) > 2)
paged_table(ar.diffs)
#  collapse SAM ratings by participant----
sam.cat <- sam
sam.cat.col.val <- get.val((gsub(" Valence", "", colnames(sam.cat))[8:247]))

sam.cat$pos.val <- rowMeans(sam.cat[, 8:247][sam.cat.col.val == "Positive"])
sam.cat$neg.val <- rowMeans(sam.cat[, 8:247][sam.cat.col.val == "Negative"])
sam.cat$neu.val <- rowMeans(sam.cat[, 8:247][sam.cat.col.val == "Neutral"])

sam.cat <- select(sam.cat, c(Group:Hearing.Status, pos.val:neu.val))

#confirmatory word difference testing
t.test(sam.desc.ar$mean[get.val(sam.desc.ar$Target)=="Positive"],
       sam.desc.ar$mean[get.val(sam.desc.ar$Target)=="Negative"])
## 
##  Welch
##  Two
##  Sample
##  t-test
## 
## data:  sam.desc.ar$mean[get.val(sam.desc.ar$Target) == "Positive"] and sam.desc.ar$mean[get.val(sam.desc.ar$Target) == "Negative"]
## t =
## 1.4274,
## df =
## 77.765,
## p-value
## = 0.1575
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.1350464  0.8190940
## sample estimates:
## mean of x 
##  5.243452 
## mean of y 
##  4.901429
t.test(sam.desc.ar$mean[get.val(sam.desc.ar$Target)=="Positive"],
       sam.desc.ar$mean[get.val(sam.desc.ar$Target)=="Neutral"])
## 
##  Welch
##  Two
##  Sample
##  t-test
## 
## data:  sam.desc.ar$mean[get.val(sam.desc.ar$Target) == "Positive"] and sam.desc.ar$mean[get.val(sam.desc.ar$Target) == "Neutral"]
## t =
## 3.9478,
## df =
## 69.009,
## p-value
## =
## 0.0001875
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.3899289 1.1865862
## sample estimates:
## mean of x 
##  5.243452 
## mean of y 
##  4.455195
t.test(sam.desc.ar$mean[get.val(sam.desc.ar$Target)=="Neutral"],
       sam.desc.ar$mean[get.val(sam.desc.ar$Target)=="Negative"])
## 
##  Welch
##  Two
##  Sample
##  t-test
## 
## data:  sam.desc.ar$mean[get.val(sam.desc.ar$Target) == "Neutral"] and sam.desc.ar$mean[get.val(sam.desc.ar$Target) == "Negative"]
## t =
## -2.1512,
## df =
## 66.874,
## p-value
## =
## 0.03507
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.86028181 -0.03218572
## sample estimates:
## mean of x 
##  4.455195 
## mean of y 
##  4.901429
t.test(sam.desc.val$mean[get.val(sam.desc.ar$Target)=="Positive"],
       sam.desc.val$mean[get.val(sam.desc.ar$Target)=="Negative"])
## 
##  Welch
##  Two
##  Sample
##  t-test
## 
## data:  sam.desc.val$mean[get.val(sam.desc.ar$Target) == "Positive"] and sam.desc.val$mean[get.val(sam.desc.ar$Target) == "Negative"]
## t =
## 21.102,
## df =
## 75.767,
## p-value
## <
## 2.2e-16
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  4.073098 4.922140
## sample estimates:
## mean of x 
##  7.151190 
## mean of y 
##  2.653571
t.test(sam.desc.val$mean[get.val(sam.desc.ar$Target)=="Positive"],
       sam.desc.val$mean[get.val(sam.desc.ar$Target)=="Neutral"])
## 
##  Welch
##  Two
##  Sample
##  t-test
## 
## data:  sam.desc.val$mean[get.val(sam.desc.ar$Target) == "Positive"] and sam.desc.val$mean[get.val(sam.desc.ar$Target) == "Neutral"]
## t =
## 11.852,
## df =
## 77.446,
## p-value
## <
## 2.2e-16
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  2.184459 3.066623
## sample estimates:
## mean of x 
##  7.151190 
## mean of y 
##  4.525649
t.test(sam.desc.val$mean[get.val(sam.desc.ar$Target)=="Neutral"],
       sam.desc.val$mean[get.val(sam.desc.ar$Target)=="Negative"])
## 
##  Welch
##  Two
##  Sample
##  t-test
## 
## data:  sam.desc.val$mean[get.val(sam.desc.ar$Target) == "Neutral"] and sam.desc.val$mean[get.val(sam.desc.ar$Target) == "Negative"]
## t =
## 9.2145,
## df =
## 77.395,
## p-value
## =
## 4.431e-14
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  1.467554 2.276602
## sample estimates:
## mean of x 
##  4.525649 
## mean of y 
##  2.653571

9 SAM vs ANEW Categorical Comparisons

#  Function to return the categorical valence of a given valence rating
get.val.cat <- function(x) {
  valence <- ifelse(data.table::between(x, 6, 10, incbounds = FALSE),
    "Positive",
    ifelse(data.table::between(x, 0, 4, incbounds = FALSE),
      "Negative",
      ifelse(data.table::between(x, 4, 6, incbounds = TRUE),
        "Neutral", "None"
      )
    )
  )
  return(valence)
}

#  Comparing valence categories based on SAM averages
sam.cat.comp.val <- sam.comp.val %>%
  mutate(.,
    Rated.Category = get.val.cat(.$Rated.Mean),
    ANEW.Category = get.val(.$Target),
    Target.Word.Type = get.wt(.$Target)
  ) %>%
  mutate(.,
    Category.Comparison = ifelse(.$ANEW.Category == .$Rated.Category, "Same", "Different")
  ) %>%
  mutate(.,
    Rated.Word.Type = case_when(
      .$ANEW.Category == "Neutral" & .$Rated.Category == "Negative" ~ "Emotion-Laden",
      .$ANEW.Category == "Neutral" & .$Rated.Category == "Positive" ~ "Emotion-Laden",
      .$ANEW.Category != "Neutral" & .$Rated.Category == "Neutral" ~ "Neutral",
      TRUE ~ .$Target.Word.Type
    )
  )
paged_table(sam.cat.comp.val)
#  Isolate differences & Graph----

cat.comp.mismatches <- sam.cat.comp.val %>%
  select(., c(Target:Rated.Mean, ANEW.Mean, Difference:Rated.Word.Type)) %>%
  filter(., .$Category.Comparison == "Different")
paged_table(cat.comp.mismatches)
#  Mismatched words by category
unique(cat.comp.mismatches$Target[cat.comp.mismatches$ANEW.Category == "Positive" & cat.comp.mismatches$Rated.Category == "Neutral"])
## [1] "baby"  
## [2] "circus"
unique(cat.comp.mismatches$Target[cat.comp.mismatches$ANEW.Category == "Negative" & cat.comp.mismatches$Rated.Category == "Neutral"])
## [1] "alone"  
## [2] "burial" 
## [3] "tragedy"
unique(cat.comp.mismatches$Target[cat.comp.mismatches$ANEW.Category == "Neutral" & cat.comp.mismatches$Rated.Category == "Negative"])
##  [1] "alley"  
##  [2] "beast"  
##  [3] "clumsy" 
##  [4] "cold"   
##  [5] "hide"   
##  [6] "hit"    
##  [7] "kick"   
##  [8] "noisy"  
##  [9] "obscene"
## [10] "rancid" 
## [11] "revolt" 
## [12] "rough"  
## [13] "tamper"
#  Histograms
plot_histogram(cat.comp.mismatches$Difference)

plot_histogram(cat.comp.mismatches$Rated.Mean)

plot_histogram(cat.comp.mismatches$ANEW.Mean)

#  Tables
formattable(table(cat.comp.mismatches$Rated.Category))
## 
## Negative 
##       15 
##  Neutral 
##        5
formattable(table(cat.comp.mismatches$ANEW.Category))
## 
## Negative 
##        3 
##  Neutral 
##       13 
## Positive 
##        4
formattable(table(cat.comp.mismatches$Target.Word.Type))
## 
## Emotion-Label 
##             2 
## Emotion-Laden 
##             5 
##       Neutral 
##            13
formattable(table(cat.comp.mismatches$Rated.Word.Type))
## 
## Emotion-Label 
##             1 
## Emotion-Laden 
##            14 
##       Neutral 
##             5
formattable(table(cat.comp.mismatches[, c(6, 7)]))
##               ANEW.Category
## Rated.Category Negative
##       Negative  0      
##       Neutral   3      
##               ANEW.Category
## Rated.Category Neutral
##       Negative 13     
##       Neutral   0     
##               ANEW.Category
## Rated.Category Positive
##       Negative  2      
##       Neutral   2
formattable(table(cat.comp.mismatches[, c(8, 10)]))
##                 Rated.Word.Type
## Target.Word.Type Emotion-Label
##    Emotion-Label  1           
##    Emotion-Laden  0           
##    Neutral        0           
##                 Rated.Word.Type
## Target.Word.Type Emotion-Laden
##    Emotion-Label  0           
##    Emotion-Laden  1           
##    Neutral       13           
##                 Rated.Word.Type
## Target.Word.Type Neutral
##    Emotion-Label  1     
##    Emotion-Laden  4     
##    Neutral        0
#  Comparing valence categories based individual ratings
sam.val.ind <- sam %>%
  select(., Session.ID, ends_with("Valence")) %>%
  melt(.) %>%
  rename(.,
    Target = "variable",
    Rated.Mean = "value"
  ) %>%
  mutate(.,
    Target = gsub(" Valence", "", .$Target)
  ) %>%
  mutate(.,
    Rated.Category = get.val.cat(.$Rated.Mean),
    ANEW.Category = get.val(.$Target),
    Target.Word.Type = get.wt(.$Target)
  ) %>%
  mutate(.,
    Category.Comparison = ifelse(.$ANEW.Category == .$Rated.Category, "Same", "Different")
  ) %>%
  mutate(.,
    Rated.Word.Type = case_when(
      .$ANEW.Category == "Neutral" & .$Rated.Category == "Negative" ~ "Emotion-Laden",
      .$ANEW.Category == "Neutral" & .$Rated.Category == "Positive" ~ "Emotion-Laden",
      .$ANEW.Category != "Neutral" & .$Rated.Category == "Neutral" ~ "Neutral",
      TRUE ~ .$Target.Word.Type
    )
  )
paged_table(sam.val.ind)
cat.ind.mismatches <- sam.val.ind %>%
  filter(., .$Category.Comparison == "Different")
paged_table(cat.ind.mismatches)
#  Mismatched words by category
unique(cat.ind.mismatches$Target[cat.ind.mismatches$ANEW.Category == "Positive" & cat.ind.mismatches$Rated.Category == "Neutral"])
##  [1] "brave"  
##  [2] "snuggle"
##  [3] "luxury" 
##  [4] "desire" 
##  [5] "cute"   
##  [6] "freedom"
##  [7] "honest" 
##  [8] "jewel"  
##  [9] "victory"
## [10] "cheer"  
## [11] "circus" 
## [12] "fun"    
## [13] "hopeful"
## [14] "cake"   
## [15] "devoted"
## [16] "treat"  
## [17] "aroused"
## [18] "secure" 
## [19] "bride"  
## [20] "diamond"
## [21] "ecstasy"
## [22] "baby"   
## [23] "gift"   
## [24] "kind"   
## [25] "reward" 
## [26] "wealthy"
## [27] "puppy"  
## [28] "pride"  
## [29] "loyal"  
## [30] "comfort"
## [31] "respect"
## [32] "delight"
## [33] "hug"    
## [34] "triumph"
## [35] "kiss"   
## [36] "elated" 
## [37] "lust"
unique(cat.ind.mismatches$Target[cat.ind.mismatches$ANEW.Category == "Negative" & cat.ind.mismatches$Rated.Category == "Neutral"])
##  [1] "anger"  
##  [2] "failure"
##  [3] "unhappy"
##  [4] "misery" 
##  [5] "traitor"
##  [6] "horror" 
##  [7] "rotten" 
##  [8] "death"  
##  [9] "danger" 
## [10] "fear"   
## [11] "tumor"  
## [12] "tragedy"
## [13] "pain"   
## [14] "victim" 
## [15] "devil"  
## [16] "annoy"  
## [17] "hell"   
## [18] "afraid" 
## [19] "ache"   
## [20] "alone"  
## [21] "injury" 
## [22] "poison" 
## [23] "selfish"
## [24] "burial" 
## [25] "scared" 
## [26] "coffin" 
## [27] "jail"   
## [28] "hurt"   
## [29] "abuse"  
## [30] "cancer" 
## [31] "divorce"
## [32] "mad"    
## [33] "rage"   
## [34] "lonely" 
## [35] "rude"   
## [36] "robber" 
## [37] "crisis" 
## [38] "grief"  
## [39] "trauma" 
## [40] "sad"
unique(cat.ind.mismatches$Target[cat.ind.mismatches$ANEW.Category == "Neutral" & cat.ind.mismatches$Rated.Category == "Negative"])
##  [1] "hit"    
##  [2] "alien"  
##  [3] "cliff"  
##  [4] "rough"  
##  [5] "trumpet"
##  [6] "obscene"
##  [7] "body"   
##  [8] "alley"  
##  [9] "boxer"  
## [10] "hide"   
## [11] "vanity" 
## [12] "reptile"
## [13] "hard"   
## [14] "lion"   
## [15] "defiant"
## [16] "cold"   
## [17] "lump"   
## [18] "revolt" 
## [19] "dentist"
## [20] "tease"  
## [21] "army"   
## [22] "beast"  
## [23] "clumsy" 
## [24] "doctor" 
## [25] "highway"
## [26] "hotel"  
## [27] "kick"   
## [28] "mystic" 
## [29] "news"   
## [30] "noisy"  
## [31] "rancid" 
## [32] "storm"  
## [33] "swamp"  
## [34] "tamper" 
## [35] "tank"   
## [36] "truck"  
## [37] "vampire"
## [38] "volcano"
#  Tables
formattable(table(cat.ind.mismatches$Session.ID))
## 
##     na0415 
##         26 
##     na0416 
##         26 
##     na0425 
##         16 
##     na0903 
##         24 
##     na0910 
##         21 
##     na0917 
##         36 
##     na0926 
##         23 
##     na1003 
##         24 
##     na1010 
##         33 
##     na1017 
##         19 
##     na1022 
##         20 
##     na1023 
##         26 
##     na1024 
##         28 
##     na1028 
##         28 
##     na1029 
##         18 
##     na1030 
##         29 
##     na1031 
##         13 
##     nb0416 
##         19 
##     nb0418 
##         19 
##     nb0829 
##         20 
##     nb0905 
##         22 
##     nb0910 
##         18 
##     nb0917 
##         29 
##     nb1004 
##         23 
##     nb1008 
##         29 
##     nb1010 
##         18 
##     nb1017 
##         23 
##     nb1018 
##         28 
##     nb1022 
##         19 
##     nb1024 
##         26 
##     nb1028 
##         17 
##     nb1029 
##         40 
##     nb1030 
##         20 
##     nb1031 
##         12 
##     nb1032 
##         14 
##     pa0325 
##         23 
##     pa0329 
##         26 
##     pa0402 
##         15 
##     pb0326 
##         20 
##     pb0405 
##         25 
##     pb0830 
##         21 
## pilota0325 
##         32 
## pilota0329 
##         35 
## pilota0402 
##         32 
## pilota0829 
##         26 
## pilota0905 
##         30 
## pilota0912 
##         17 
## pilota0919 
##         27 
## pilota1003 
##         38 
## pilota1008 
##         29 
## pilota1010 
##         32 
## pilota1022 
##         29 
## pilota1023 
##         29 
## pilota1024 
##         34 
## pilota1028 
##         34 
## pilota1029 
##         33 
## pilota1030 
##         22 
## pilota1031 
##         30 
## pilota1032 
##         30 
## pilotb0326 
##         37 
## pilotb0401 
##         22 
## pilotb0402 
##         22 
## pilotb0404 
##         18 
## pilotb0910 
##         29 
## pilotb0912 
##         39 
## pilotb0926 
##         28 
## pilotb1003 
##         30 
## pilotb1008 
##         43 
## pilotb1015 
##         36 
## pilotb1022 
##         25 
## pilotb1023 
##         23 
## pilotb1024 
##         26 
## pilotb1028 
##         24 
## pilotb1029 
##         41 
## pilotb1030 
##         28 
## pilotb1031 
##         36 
## pilotb1032 
##         21
formattable(table(cat.ind.mismatches$Target))
## 
##   abuse 
##       1 
##    ache 
##      15 
##  afraid 
##       6 
##   alien 
##       8 
##   alley 
##      37 
##   alone 
##      31 
##   anger 
##       5 
##   annoy 
##      20 
##    army 
##      24 
## aroused 
##       7 
##    baby 
##      38 
##   beast 
##      41 
##    body 
##       7 
##   boxer 
##      29 
##   brave 
##       1 
##   bride 
##      16 
##  burial 
##      31 
##    cake 
##      12 
##  cancer 
##       3 
##   cheer 
##       4 
##  circus 
##      23 
##   cliff 
##      15 
##  clumsy 
##      31 
##  coffin 
##      11 
##    cold 
##      41 
## comfort 
##       2 
##  crisis 
##       5 
##    cute 
##       3 
##  danger 
##       8 
##   death 
##       1 
## defiant 
##      32 
## delight 
##       2 
## dentist 
##      24 
##  desire 
##       9 
##   devil 
##      21 
## devoted 
##      10 
## diamond 
##      17 
## divorce 
##       5 
##  doctor 
##      27 
## ecstasy 
##      10 
##  elated 
##       6 
## failure 
##       2 
##    fear 
##      10 
## freedom 
##       4 
##     fun 
##      26 
##    gift 
##       6 
##   grief 
##       1 
##    hard 
##      45 
##    hell 
##      13 
##    hide 
##      46 
## highway 
##      11 
##     hit 
##      55 
##  honest 
##       7 
## hopeful 
##       8 
##  horror 
##       8 
##   hotel 
##      15 
##     hug 
##       2 
##    hurt 
##       6 
##  injury 
##       7 
##  invest 
##      26 
##    jail 
##      12 
##   jewel 
##      25 
##     joy 
##       1 
##    kick 
##      31 
##    kind 
##       3 
##    kiss 
##      36 
##    lion 
##      22 
##  lonely 
##       6 
##   loyal 
##      37 
##    lump 
##      38 
##    lust 
##      21 
##  luxury 
##       9 
##     mad 
##       8 
##  misery 
##       1 
##  mystic 
##      27 
##    news 
##      17 
##   noisy 
##      37 
## obscene 
##      52 
##    pain 
##       2 
##  poison 
##       8 
##   pride 
##      13 
##   puppy 
##       5 
##    rage 
##       8 
##  rancid 
##      64 
## reptile 
##      18 
## respect 
##       7 
##  revolt 
##      43 
##  reward 
##       6 
##  robber 
##      14 
##  rotten 
##       9 
##   rough 
##      37 
##    rude 
##      12 
##     sad 
##       7 
##  salute 
##      25 
##  scared 
##       3 
##  secure 
##      11 
## selfish 
##      13 
## snuggle 
##       4 
##   storm 
##      39 
##   swamp 
##      22 
##  tamper 
##      34 
##    tank 
##      14 
##   tease 
##      30 
## tragedy 
##      34 
## traitor 
##       9 
##  trauma 
##       1 
##   treat 
##       6 
## triumph 
##       3 
##   truck 
##       7 
## trumpet 
##      70 
##   tumor 
##       3 
## unhappy 
##       5 
## vampire 
##      27 
##  vanity 
##      24 
##  victim 
##       2 
## victory 
##       2 
## volcano 
##      31 
## wealthy 
##      11
formattable(table(cat.ind.mismatches$Rated.Category))
## 
## Negative 
##      897 
##  Neutral 
##      710 
## Positive 
##      396
formattable(table(cat.ind.mismatches$ANEW.Category))
## 
## Negative 
##      367 
##  Neutral 
##     1223 
## Positive 
##      413
formattable(table(cat.ind.mismatches$Target.Word.Type))
## 
## Emotion-Label 
##           349 
## Emotion-Laden 
##           431 
##       Neutral 
##          1223
formattable(table(cat.ind.mismatches$Rated.Word.Type))
## 
## Emotion-Label 
##            30 
## Emotion-Laden 
##          1263 
##       Neutral 
##           710
formattable(table(cat.ind.mismatches[, c(4, 5)]))
##               ANEW.Category
## Rated.Category Negative
##       Negative   0     
##       Neutral  357     
##       Positive  10     
##               ANEW.Category
## Rated.Category Neutral
##       Negative 837    
##       Neutral    0    
##       Positive 386    
##               ANEW.Category
## Rated.Category Positive
##       Negative  60     
##       Neutral  353     
##       Positive   0
formattable(table(cat.ind.mismatches[, c(6, 8)]))
##                 Rated.Word.Type
## Target.Word.Type Emotion-Label
##    Emotion-Label   30         
##    Emotion-Laden    0         
##    Neutral          0         
##                 Rated.Word.Type
## Target.Word.Type Emotion-Laden
##    Emotion-Label    0         
##    Emotion-Laden   40         
##    Neutral       1223         
##                 Rated.Word.Type
## Target.Word.Type Neutral
##    Emotion-Label  319   
##    Emotion-Laden  391   
##    Neutral          0

10 Passing Data Through sl.pipe and Recoding Valence Categories

#  Main Data Frame
sl.trimmed <- sl.pipe(sl.merged)

#  get. functions for recoding
get.val.cat.avg <- function(x) {
  valence <- sam.cat.comp.val$Rated.Category[sam.cat.comp.val$Target == x]
  return(valence)
}
get.wt.avg <- function(x) {
  wt <- sam.cat.comp.val$Rated.Word.Type[sam.cat.comp.val$Target == x]
  return(wt)
}
get.val.cat.ind <- function(x, id) {
  valence <- sam.val.ind$Rated.Category[sam.val.ind$Target == x & sam.val.ind$Session.ID == id]
  return(valence)
}
get.wt.ind <- function(x, id) {
  wt <- sam.val.ind$Rated.Word.Type[sam.val.ind$Target == x & sam.val.ind$Session.ID == id]
  return(wt)
}

#  Recoding Based on SAM Averages
sl.recoded.avg <- sl.trimmed %>%
  mutate(., #  rewrite stimuli valences and wts
    Target.V = sapply(.$Target, FUN = get.val.cat.avg),
    Pair1.V = sapply(.$Pair1, FUN = get.val.cat.avg),
    Pair2.V = sapply(.$Pair2, FUN = get.val.cat.avg),
    Target.WT = sapply(.$Target, FUN = get.wt.avg),
    Pair1.WT = sapply(.$Pair1, FUN = get.wt.avg),
    Pair2.WT = sapply(.$Pair2, FUN = get.wt.avg)
  ) %>%
  sl.pipe(.)

#  Recoding Based on Individual Participant Ratings
sl.recoded.ind <- sl.trimmed %>%
  mutate(., #  rewrite stimuli valences and wts
    Target.V = mapply(get.val.cat.ind, .$Target, .$Session.ID),
    Pair1.V = mapply(get.val.cat.ind, .$Pair1, .$Session.ID),
    Pair2.V = mapply(get.val.cat.ind, .$Pair2, .$Session.ID),
    Target.WT = mapply(get.wt.ind, .$Target, .$Session.ID),
    Pair1.WT = mapply(get.wt.ind, .$Pair1, .$Session.ID),
    Pair2.WT = mapply(get.wt.ind, .$Pair2, .$Session.ID)
  ) %>%
  sl.pipe(.)

#  Removing Based on SAM Averages
sl.removed.avg <- sl.trimmed %>%
  filter(
    .,
    !.$Target %in% cat.comp.mismatches$Target,
    !.$Pair1 %in% cat.comp.mismatches$Target,
    !.$Pair2 %in% cat.comp.mismatches$Target
  ) %>%
  sl.pipe(.)
nrow(sl.removed.avg)
## [1] 3251
#  Removing Based on Individual Participant Ratings
sl.removed.ind <- sl.trimmed %>%
  filter(
    .,
    get.val(.$Target) != mapply(get.val.cat.ind, .$Target, .$Session.ID),
    get.val(.$Pair1) != mapply(get.val.cat.ind, .$Target, .$Session.ID),
    get.val(.$Pair2) != mapply(get.val.cat.ind, .$Target, .$Session.ID)
  ) %>%
  sl.pipe(.)
nrow(sl.removed.ind)
## [1] 768

11 Factorize & Aggregate For Repeated Measures Mixed ANOVAs

#Factorize
rmaov.pipe <- function(df) {
  sl.factorized <- df %>%
    mutate_if(sapply(., is.character), as.factor) %>%
    mutate_if(sapply(., is.factor), fac.to.num)
  sl.agg.desc <- sl.factorized %>%
    dplyr::select(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, Target.WT, TPRValence, bdi, Error.Code, Reaction.Time) %>%
    group_by(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, TPRValence, Target.WT, bdi, Error.Code) %>%
    summarise_all(list(length = length, mean = mean, median = median, sd = sd, min = min, max = max)) %>%
    rename(RTCMean = "mean", n = "length", Congruency = TPRValence)
  sl.agg <- sl.factorized %>%
    dplyr::select(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, Target.WT, TPRValence, bdi, Error.Code, Reaction.Time) %>%
    group_by(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, TPRValence, Target.WT, bdi, Error.Code) %>%
    summarise_all(list(mean = mean)) %>%
    rename(RTCMean = mean, Congruency = TPRValence)

  sl.agg.rm <- sl.factorized %>%
    dplyr::select(Session.ID, Participant.Group, X.Primed.Satiated, Target.V, Target.WT, TPRValence, bdi, Error.Code, Reaction.Time) %>%
    group_by(Session.ID, Participant.Group, X.Primed.Satiated, Target.V, TPRValence, Target.WT, bdi, Error.Code) %>%
    summarise_all(list(mean = mean)) %>%
    filter(Error.Code == 1, Target.WT != 3, Target.V != 3) %>%
    merge(., c(distinct(sl.agg[, 1:2])), by = "Session.ID") %>%
    dplyr::select(., -Participant.Group.y) %>%
    rename(RTCMean = mean, Congruency = TPRValence, Participant.Group = Participant.Group.x) %>%
    mutate(Participant.Group = ifelse(.$Participant.Group %in% c(1, 2), 1, 2))
  
  sl.agg.rm.spss <- sl.agg.rm %>%
    recast(.,
      formula = Session.ID ~ Target.WT + X.Primed.Satiated,
      id.var = c("Session.ID", "Target.WT", "X.Primed.Satiated"),
      measure.var = "RTCMean",
      fun = mean
    ) %>%
    merge(., c(distinct(sl.agg[, c(1:2, 7)])), by = "Session.ID") %>%
    mutate(Participant.Group = ifelse(.$Participant.Group %in% c(1, 2), 1, 2))
  write.csv(sl.agg.rm.spss, "sl.agg.rm.spss.csv")

    rm.simple <- jmv::anovaRM(
    data = sl.agg.rm.spss,
    rm = list(
        list(
            label="Word Type",
            levels=c("Emotion-label", "Emotion-laden")),
        list(
            label="Primed/Satiated",
            levels=c("Primed", "Satiated"))),
    rmCells = list(
        list(
            measure="1_1",
            cell=c("Emotion-label", "Primed")),
        list(
            measure="1_2",
            cell=c("Emotion-label", "Satiated")),
        list(
            measure="2_1",
            cell=c("Emotion-laden", "Primed")),
        list(
            measure="2_2",
            cell=c("Emotion-laden", "Satiated"))),
    bs = Participant.Group,
    effectSize = c("ges", "eta", "partEta"),
    depLabel = "RT",
    rmTerms = ~ `Word Type` + `Primed/Satiated` + `Word Type`:`Primed/Satiated`,
    bsTerms = ~ Participant.Group,
    spherTests = TRUE,
    leveneTest = TRUE,
    postHoc = list(
        "Word Type",
        "Primed/Satiated",
        c("Word Type", "Primed/Satiated"),
        "Participant.Group",
        c("Word Type", "Participant.Group"),
        c(
            "Primed/Satiated",
            "Participant.Group"),
        c(
            "Word Type",
            "Primed/Satiated",
            "Participant.Group")),
    postHocCorr = c("tukey","scheffe"),
    emmPlots = FALSE,
    emmWeights = FALSE,
    groupSumm = TRUE)

  rm.bdi <- jmv::anovaRM(
    data = sl.agg.rm.spss,
    rm = list(
        list(
            label="Word Type",
            levels=c("Emotion-label", "Emotion-laden")),
        list(
            label="Primed/Satiated",
            levels=c("Primed", "Satiated"))),
    rmCells = list(
        list(
            measure="1_1",
            cell=c("Emotion-label", "Primed")),
        list(
            measure="1_2",
            cell=c("Emotion-label", "Satiated")),
        list(
            measure="2_1",
            cell=c("Emotion-laden", "Primed")),
        list(
            measure="2_2",
            cell=c("Emotion-laden", "Satiated"))),
    bs = vars(Participant.Group, bdi),
    effectSize = c("ges", "eta", "partEta"),
    depLabel = "RT",
    rmTerms = ~ `Word Type` + `Primed/Satiated` + `Word Type`:`Primed/Satiated`,
    bsTerms = ~ Participant.Group + bdi + Participant.Group:bdi,
    spherTests = TRUE,
    leveneTest = TRUE,
    postHoc = list(),
    postHocCorr = NULL,
    emmPlots = FALSE,
    emmWeights = FALSE,
    groupSumm = TRUE
    )
  return(list(
    rm.simple,
    rm.bdi
  ))
  }

#rmaov.pipe(sl.trimmed)
#rmaov.pipe(sl.recoded.avg)
#rmaov.pipe(sl.recoded.ind)
#rmaov.pipe(sl.removed.avg)
#rmaov.pipe(sl.removed.ind)

12 Complete SuperLab Descriptives

desc.pipe <- function(df) {
  return(list(
    prop.table(table(df$Participant.Group)),
    prop.table(table(df$Correct.Response)),
    prop.table(table(df$Error.Code)),
    prop.table(table(df$X.Primed.Satiated)),
    prop.table(table(df$Target.V)),
    prop.table(table(df$Pair1.V)),
    prop.table(table(df$Pair2.V)),
    prop.table(table(df$Target.WT)),
    prop.table(table(df$Pair1.WT)),
    prop.table(table(df$Pair2.WT)),
    prop.table(table(df$bdi)),
    prop.table(table(df$stai.y1)),
    prop.table(table(df$stai.y2)),
    prop.table(table(df$mood.state.2)),
    prop.table(table(df$mood.state.2.y1)),
    prop.table(table(df$mood.state.2.y2)),
    prop.table(table(df$mood.state.4.y1)),
    prop.table(table(df$mood.state.4.y2)),
    prop.table(table(df$mood.state.4)),
    prop.table(table(df$mood.state.8)),
    prop.table(table(df$Age)),
    prop.table(table(df$Gender)),
    prop.table(table(df$Handedness)),
    prop.table(table(df$Hearing.Status)),
    prop.table(table(df$Pair.MM)),
    prop.table(table(df$Pair.WT)),
    prop.table(table(df$TPRValence)),
    prop.table(table(df$TPRWT)),
    prop.table(table(df$Valence.Congruency)),
    prop.table(table(df$Word.Type.Congruency)),
    prop.table(table(df$Primed.Satiated.MM)),
    describe(select(df, Reaction.Time, bdi.tot, stai.y1.tot, stai.y2.tot, Age))
  ))
}
desc.pipe(sl.trimmed)
## [[1]]
## 
##       n_a 
## 0.2493126 
##       n_b 
## 0.2637947 
##    pilota 
## 0.2507791 
##    pilotb 
## 0.2361137 
## 
## [[2]]
## 
##     Match 
## 0.5191567 
##  Mismatch 
## 0.4808433 
## 
## [[3]]
## 
##          C 
## 0.71384051 
##          E 
## 0.26801100 
##         NR 
## 0.01814849 
## 
## [[4]]
## 
##   Primed 
## 0.500275 
## Satiated 
## 0.499725 
## 
## [[5]]
## 
##  Negative 
## 0.3847846 
##   Neutral 
## 0.2566453 
##  Positive 
## 0.3585701 
## 
## [[6]]
## 
##  Negative 
## 0.3270394 
##   Neutral 
## 0.3846013 
##  Positive 
## 0.2883593 
## 
## [[7]]
## 
##  Negative 
## 0.3912007 
##   Neutral 
## 0.3272227 
##  Positive 
## 0.2815765 
## 
## [[8]]
## 
## Emotion-Label 
##     0.3715857 
## Emotion-Laden 
##     0.3717690 
##       Neutral 
##     0.2566453 
## 
## [[9]]
## 
## Emotion-Label 
##     0.2887259 
## Emotion-Laden 
##     0.3266728 
##       Neutral 
##     0.3846013 
## 
## [[10]]
## 
## Emotion-Label 
##     0.3268561 
## Emotion-Laden 
##     0.3459212 
##       Neutral 
##     0.3272227 
## 
## [[11]]
## 
## Non-Scoring 
##   0.6736939 
##     Scoring 
##   0.3263061 
## 
## [[12]]
## 
## Non-Scoring 
##   0.7587534 
##     Scoring 
##   0.2412466 
## 
## [[13]]
## 
## Non-Scoring 
##   0.5871677 
##     Scoring 
##   0.4128323 
## 
## [[14]]
## 
## Non-Scoring 
##   0.4295142 
##     Scoring 
##   0.5704858 
## 
## [[15]]
## 
## Non-Scoring 
##   0.5593034 
##     Scoring 
##   0.4406966 
## 
## [[16]]
## 
## Non-Scoring 
##   0.5013749 
##     Scoring 
##   0.4986251 
## 
## [[17]]
## 
##  BDI & STAI 
##   0.1268561 
##    BDI Only 
##   0.1994500 
## Non-Scoring 
##   0.5593034 
##   STAI Only 
##   0.1143905 
## 
## [[18]]
## 
##  BDI & STAI 
##  0.24051329 
##    BDI Only 
##  0.08579285 
## Non-Scoring 
##  0.50137489 
##   STAI Only 
##  0.17231897 
## 
## [[19]]
## 
##  BDI & STAI 
##  0.24051329 
##    BDI Only 
##  0.08579285 
## Non-Scoring 
##  0.42951421 
##   STAI Only 
##  0.24417965 
## 
## [[20]]
## 
## BDI & STAI Y1 & STAI Y2 
##              0.12685610 
##           BDI & STAI Y2 
##              0.11365720 
##                BDI Only 
##              0.08579285 
##             Non-Scoring 
##              0.42951421 
##       STAI Y1 & STAI Y2 
##              0.04252979 
##            STAI Y1 Only 
##              0.07186068 
##            STAI Y2 Only 
##              0.12978918 
## 
## [[21]]
## 
##         18 
## 0.43189734 
##         19 
## 0.24326306 
##         20 
## 0.11439047 
##         21 
## 0.11145738 
##         22 
## 0.07039413 
##         23 
## 0.01393217 
##         25 
## 0.01466544 
## 
## [[22]]
## 
##     Female 
## 0.59816682 
##       Male 
## 0.38790101 
## Non-Binary 
## 0.01393217 
## 
## [[23]]
## 
##  Left-handed 
##   0.09899175 
## Right-handed 
##   0.90100825 
## 
## [[24]]
## 
## Hearing 
##       1 
## 
## [[25]]
## 
##     Match 
## 0.5191567 
##  Mismatch 
## 0.4808433 
## 
## [[26]]
## 
## Emotion-Label 
##    0.15398717 
## Emotion-Laden 
##    0.14060495 
##    Mixed E.EL 
##    0.05132906 
##   Mixed E.NEU 
##    0.08340972 
##    Mixed EL.E 
##    0.08340972 
##  Mixed EL.NEU 
##    0.10265811 
##   Mixed NEU.E 
##    0.08945921 
##  Mixed NEU.EL 
##    0.15398717 
##       Neutral 
##    0.14115490 
## 
## [[27]]
## 
##        Congruent 
##        0.3266728 
##      Incongruent 
##        0.2438130 
## Pair 1 Congruent 
##        0.1860678 
## Pair 2 Congruent 
##        0.2434464 
## 
## [[28]]
## 
##        Congruent 
##        0.1153071 
##      Incongruent 
##        0.5769019 
## Pair 1 Congruent 
##        0.1475710 
## Pair 2 Congruent 
##        0.1602200 
## 
## [[29]]
## 
##        Negative.Congruent 
##                0.17946838 
##      Negative.Incongruent 
##                0.03849679 
## Negative.Pair 1 Congruent 
##                0.05774519 
## Negative.Pair 2 Congruent 
##                0.10907424 
##         Neutral.Congruent 
##                0.03849679 
##       Neutral.Incongruent 
##                0.14115490 
##  Neutral.Pair 1 Congruent 
##                0.03849679 
##  Neutral.Pair 2 Congruent 
##                0.03849679 
##        Positive.Congruent 
##                0.10870761 
##      Positive.Incongruent 
##                0.06416132 
## Positive.Pair 1 Congruent 
##                0.08982585 
## Positive.Pair 2 Congruent 
##                0.09587534 
## 
## [[30]]
## 
##        Emotion-Label.Congruent 
##                     0.03849679 
##      Emotion-Label.Incongruent 
##                     0.22419798 
## Emotion-Label.Pair 1 Congruent 
##                     0.05132906 
## Emotion-Label.Pair 2 Congruent 
##                     0.05756187 
##        Emotion-Laden.Congruent 
##                     0.03831347 
##      Emotion-Laden.Incongruent 
##                     0.21154904 
## Emotion-Laden.Pair 1 Congruent 
##                     0.05774519 
## Emotion-Laden.Pair 2 Congruent 
##                     0.06416132 
##              Neutral.Congruent 
##                     0.03849679 
##            Neutral.Incongruent 
##                     0.14115490 
##       Neutral.Pair 1 Congruent 
##                     0.03849679 
##       Neutral.Pair 2 Congruent 
##                     0.03849679 
## 
## [[31]]
## 
##      Primed.Match 
##         0.2502291 
##   Primed.Mismatch 
##         0.2500458 
##    Satiated.Match 
##         0.2689276 
## Satiated.Mismatch 
##         0.2307974 
## 
## [[32]]
##               vars
## Reaction.Time    1
## bdi.tot          2
## stai.y1.tot      3
## stai.y2.tot      4
## Age              5
##                  n
## Reaction.Time 5356
## bdi.tot       5455
## stai.y1.tot   5455
## stai.y2.tot   5455
## Age           5455
##                  mean
## Reaction.Time 1442.91
## bdi.tot         10.58
## stai.y1.tot     38.28
## stai.y2.tot     41.77
## Age             19.26
##                   sd
## Reaction.Time 471.22
## bdi.tot         8.39
## stai.y1.tot     9.04
## stai.y2.tot    10.18
## Age             1.52
##               median
## Reaction.Time 1361.5
## bdi.tot          8.0
## stai.y1.tot     37.0
## stai.y2.tot     40.0
## Age             19.0
##               trimmed
## Reaction.Time 1401.17
## bdi.tot          9.46
## stai.y1.tot     37.18
## stai.y2.tot     41.23
## Age             19.00
##                  mad
## Reaction.Time 457.38
## bdi.tot         7.41
## stai.y1.tot     5.93
## stai.y2.tot    10.38
## Age             1.48
##               min
## Reaction.Time 539
## bdi.tot         0
## stai.y1.tot    24
## stai.y2.tot    22
## Age            18
##                max
## Reaction.Time 2999
## bdi.tot         38
## stai.y1.tot     62
## stai.y2.tot     67
## Age             25
##               range
## Reaction.Time  2460
## bdi.tot          38
## stai.y1.tot      38
## stai.y2.tot      45
## Age               7
##               skew
## Reaction.Time 0.77
## bdi.tot       1.18
## stai.y1.tot   0.94
## stai.y2.tot   0.50
## Age           1.34
##               kurtosis
## Reaction.Time     0.21
## bdi.tot           1.11
## stai.y1.tot       0.34
## stai.y2.tot      -0.46
## Age               1.64
##                 se
## Reaction.Time 6.44
## bdi.tot       0.11
## stai.y1.tot   0.12
## stai.y2.tot   0.14
## Age           0.02
desc.pipe(sl.recoded.avg)
## [[1]]
## 
##       n_a 
## 0.2493126 
##       n_b 
## 0.2637947 
##    pilota 
## 0.2507791 
##    pilotb 
## 0.2361137 
## 
## [[2]]
## 
##     Match 
## 0.4551787 
##  Mismatch 
## 0.5448213 
## 
## [[3]]
## 
##          C 
## 0.72318973 
##          E 
## 0.25866178 
##         NR 
## 0.01814849 
## 
## [[4]]
## 
##   Primed 
## 0.500275 
## Satiated 
## 0.499725 
## 
## [[5]]
## 
##  Negative 
## 0.4617782 
##   Neutral 
## 0.2117324 
##  Positive 
## 0.3264895 
## 
## [[6]]
## 
##  Negative 
## 0.4168653 
##   Neutral 
## 0.3204400 
##  Positive 
## 0.2626948 
## 
## [[7]]
## 
##  Negative 
## 0.5389551 
##   Neutral 
## 0.2179652 
##  Positive 
## 0.2430797 
## 
## [[8]]
## 
## Emotion-Label 
##     0.3587534 
## Emotion-Laden 
##     0.4295142 
##       Neutral 
##     0.2117324 
## 
## [[9]]
## 
## Emotion-Label 
##     0.2758937 
## Emotion-Laden 
##     0.4036664 
##       Neutral 
##     0.3204400 
## 
## [[10]]
## 
## Emotion-Label 
##     0.3204400 
## Emotion-Laden 
##     0.4615949 
##       Neutral 
##     0.2179652 
## 
## [[11]]
## 
## Non-Scoring 
##   0.6736939 
##     Scoring 
##   0.3263061 
## 
## [[12]]
## 
## Non-Scoring 
##   0.7587534 
##     Scoring 
##   0.2412466 
## 
## [[13]]
## 
## Non-Scoring 
##   0.5871677 
##     Scoring 
##   0.4128323 
## 
## [[14]]
## 
## Non-Scoring 
##   0.4295142 
##     Scoring 
##   0.5704858 
## 
## [[15]]
## 
## Non-Scoring 
##   0.5593034 
##     Scoring 
##   0.4406966 
## 
## [[16]]
## 
## Non-Scoring 
##   0.5013749 
##     Scoring 
##   0.4986251 
## 
## [[17]]
## 
##  BDI & STAI 
##   0.1268561 
##    BDI Only 
##   0.1994500 
## Non-Scoring 
##   0.5593034 
##   STAI Only 
##   0.1143905 
## 
## [[18]]
## 
##  BDI & STAI 
##  0.24051329 
##    BDI Only 
##  0.08579285 
## Non-Scoring 
##  0.50137489 
##   STAI Only 
##  0.17231897 
## 
## [[19]]
## 
##  BDI & STAI 
##  0.24051329 
##    BDI Only 
##  0.08579285 
## Non-Scoring 
##  0.42951421 
##   STAI Only 
##  0.24417965 
## 
## [[20]]
## 
## BDI & STAI Y1 & STAI Y2 
##              0.12685610 
##           BDI & STAI Y2 
##              0.11365720 
##                BDI Only 
##              0.08579285 
##             Non-Scoring 
##              0.42951421 
##       STAI Y1 & STAI Y2 
##              0.04252979 
##            STAI Y1 Only 
##              0.07186068 
##            STAI Y2 Only 
##              0.12978918 
## 
## [[21]]
## 
##         18 
## 0.43189734 
##         19 
## 0.24326306 
##         20 
## 0.11439047 
##         21 
## 0.11145738 
##         22 
## 0.07039413 
##         23 
## 0.01393217 
##         25 
## 0.01466544 
## 
## [[22]]
## 
##     Female 
## 0.59816682 
##       Male 
## 0.38790101 
## Non-Binary 
## 0.01393217 
## 
## [[23]]
## 
##  Left-handed 
##   0.09899175 
## Right-handed 
##   0.90100825 
## 
## [[24]]
## 
## Hearing 
##       1 
## 
## [[25]]
## 
##     Match 
## 0.4551787 
##  Mismatch 
## 0.5448213 
## 
## [[26]]
## 
## Emotion-Label 
##    0.13473877 
## Emotion-Laden 
##    0.17286893 
##    Mixed E.EL 
##    0.08982585 
##   Mixed E.NEU 
##    0.05132906 
##    Mixed EL.E 
##    0.10907424 
##  Mixed EL.NEU 
##    0.12172319 
##   Mixed NEU.E 
##    0.07662695 
##  Mixed NEU.EL 
##    0.19890009 
##       Neutral 
##    0.04491292 
## 
## [[27]]
## 
##        Congruent 
##        0.3140238 
##      Incongruent 
##        0.2823098 
## Pair 1 Congruent 
##        0.1987168 
## Pair 2 Congruent 
##        0.2049496 
## 
## [[28]]
## 
##        Congruent 
##        0.1024748 
##      Incongruent 
##        0.4678277 
## Pair 1 Congruent 
##        0.2117324 
## Pair 2 Congruent 
##        0.2179652 
## 
## [[29]]
## 
##        Negative.Congruent 
##                0.20531622 
##      Negative.Incongruent 
##                0.07057745 
## Negative.Pair 1 Congruent 
##                0.07681027 
## Negative.Pair 2 Congruent 
##                0.10907424 
##         Neutral.Congruent 
##                0.01924840 
##       Neutral.Incongruent 
##                0.13473877 
##  Neutral.Pair 1 Congruent 
##                0.03849679 
##  Neutral.Pair 2 Congruent 
##                0.01924840 
##        Positive.Congruent 
##                0.08945921 
##      Positive.Incongruent 
##                0.07699358 
## Positive.Pair 1 Congruent 
##                0.08340972 
## Positive.Pair 2 Congruent 
##                0.07662695 
## 
## [[30]]
## 
##        Emotion-Label.Congruent 
##                     0.03849679 
##      Emotion-Label.Incongruent 
##                     0.21136572 
## Emotion-Label.Pair 1 Congruent 
##                     0.05132906 
## Emotion-Label.Pair 2 Congruent 
##                     0.05756187 
##        Emotion-Laden.Congruent 
##                     0.04472961 
##      Emotion-Laden.Incongruent 
##                     0.12172319 
## Emotion-Laden.Pair 1 Congruent 
##                     0.12190651 
## Emotion-Laden.Pair 2 Congruent 
##                     0.14115490 
##              Neutral.Congruent 
##                     0.01924840 
##            Neutral.Incongruent 
##                     0.13473877 
##       Neutral.Pair 1 Congruent 
##                     0.03849679 
##       Neutral.Pair 2 Congruent 
##                     0.01924840 
## 
## [[31]]
## 
##      Primed.Match 
##         0.2245646 
##   Primed.Mismatch 
##         0.2757104 
##    Satiated.Match 
##         0.2306141 
## Satiated.Mismatch 
##         0.2691109 
## 
## [[32]]
##               vars
## Reaction.Time    1
## bdi.tot          2
## stai.y1.tot      3
## stai.y2.tot      4
## Age              5
##                  n
## Reaction.Time 5356
## bdi.tot       5455
## stai.y1.tot   5455
## stai.y2.tot   5455
## Age           5455
##                  mean
## Reaction.Time 1442.91
## bdi.tot         10.58
## stai.y1.tot     38.28
## stai.y2.tot     41.77
## Age             19.26
##                   sd
## Reaction.Time 471.22
## bdi.tot         8.39
## stai.y1.tot     9.04
## stai.y2.tot    10.18
## Age             1.52
##               median
## Reaction.Time 1361.5
## bdi.tot          8.0
## stai.y1.tot     37.0
## stai.y2.tot     40.0
## Age             19.0
##               trimmed
## Reaction.Time 1401.17
## bdi.tot          9.46
## stai.y1.tot     37.18
## stai.y2.tot     41.23
## Age             19.00
##                  mad
## Reaction.Time 457.38
## bdi.tot         7.41
## stai.y1.tot     5.93
## stai.y2.tot    10.38
## Age             1.48
##               min
## Reaction.Time 539
## bdi.tot         0
## stai.y1.tot    24
## stai.y2.tot    22
## Age            18
##                max
## Reaction.Time 2999
## bdi.tot         38
## stai.y1.tot     62
## stai.y2.tot     67
## Age             25
##               range
## Reaction.Time  2460
## bdi.tot          38
## stai.y1.tot      38
## stai.y2.tot      45
## Age               7
##               skew
## Reaction.Time 0.77
## bdi.tot       1.18
## stai.y1.tot   0.94
## stai.y2.tot   0.50
## Age           1.34
##               kurtosis
## Reaction.Time     0.21
## bdi.tot           1.11
## stai.y1.tot       0.34
## stai.y2.tot      -0.46
## Age               1.64
##                 se
## Reaction.Time 6.44
## bdi.tot       0.11
## stai.y1.tot   0.12
## stai.y2.tot   0.14
## Age           0.02
desc.pipe(sl.recoded.ind)
## [[1]]
## 
##       n_a 
## 0.2493126 
##       n_b 
## 0.2637947 
##    pilota 
## 0.2507791 
##    pilotb 
## 0.2361137 
## 
## [[2]]
## 
##     Match 
## 0.4707627 
##  Mismatch 
## 0.5292373 
## 
## [[3]]
## 
##          C 
## 0.63220339 
##          E 
## 0.35021186 
##         NR 
## 0.01758475 
## 
## [[4]]
## 
##   Primed 
## 0.500275 
## Satiated 
## 0.499725 
## 
## [[5]]
## 
##  Negative 
## 0.3712191 
##   Neutral 
## 0.3140238 
##  Positive 
## 0.3147571 
## 
## [[6]]
## 
##  Negative 
## 0.3448137 
##   Neutral 
## 0.4032226 
##  Positive 
## 0.2519637 
## 
## [[7]]
## 
##  Negative 
## 0.4010070 
##   Neutral 
## 0.3659617 
##  Positive 
## 0.2330312 
## 
## [[8]]
## 
## Emotion-Label 
##     0.2929423 
## Emotion-Laden 
##     0.3930339 
##       Neutral 
##     0.3140238 
## 
## [[9]]
## 
## Emotion-Label 
##     0.2317140 
## Emotion-Laden 
##     0.4012832 
##       Neutral 
##     0.3670027 
## 
## [[10]]
## 
## Emotion-Label 
##     0.2703941 
## Emotion-Laden 
##     0.3965170 
##       Neutral 
##     0.3330889 
## 
## [[11]]
## 
## Non-Scoring 
##   0.6736939 
##     Scoring 
##   0.3263061 
## 
## [[12]]
## 
## Non-Scoring 
##   0.7587534 
##     Scoring 
##   0.2412466 
## 
## [[13]]
## 
## Non-Scoring 
##   0.5871677 
##     Scoring 
##   0.4128323 
## 
## [[14]]
## 
## Non-Scoring 
##   0.4295142 
##     Scoring 
##   0.5704858 
## 
## [[15]]
## 
## Non-Scoring 
##   0.5593034 
##     Scoring 
##   0.4406966 
## 
## [[16]]
## 
## Non-Scoring 
##   0.5013749 
##     Scoring 
##   0.4986251 
## 
## [[17]]
## 
##  BDI & STAI 
##   0.1268561 
##    BDI Only 
##   0.1994500 
## Non-Scoring 
##   0.5593034 
##   STAI Only 
##   0.1143905 
## 
## [[18]]
## 
##  BDI & STAI 
##  0.24051329 
##    BDI Only 
##  0.08579285 
## Non-Scoring 
##  0.50137489 
##   STAI Only 
##  0.17231897 
## 
## [[19]]
## 
##  BDI & STAI 
##  0.24051329 
##    BDI Only 
##  0.08579285 
## Non-Scoring 
##  0.42951421 
##   STAI Only 
##  0.24417965 
## 
## [[20]]
## 
## BDI & STAI Y1 & STAI Y2 
##              0.12685610 
##           BDI & STAI Y2 
##              0.11365720 
##                BDI Only 
##              0.08579285 
##             Non-Scoring 
##              0.42951421 
##       STAI Y1 & STAI Y2 
##              0.04252979 
##            STAI Y1 Only 
##              0.07186068 
##            STAI Y2 Only 
##              0.12978918 
## 
## [[21]]
## 
##         18 
## 0.43189734 
##         19 
## 0.24326306 
##         20 
## 0.11439047 
##         21 
## 0.11145738 
##         22 
## 0.07039413 
##         23 
## 0.01393217 
##         25 
## 0.01466544 
## 
## [[22]]
## 
##     Female 
## 0.59816682 
##       Male 
## 0.38790101 
## Non-Binary 
## 0.01393217 
## 
## [[23]]
## 
##  Left-handed 
##   0.09899175 
## Right-handed 
##   0.90100825 
## 
## [[24]]
## 
## Hearing 
##       1 
## 
## [[25]]
## 
##     Match 
## 0.4707627 
##  Mismatch 
## 0.5292373 
## 
## [[26]]
## 
## Emotion-Label 
##    0.09770852 
## Emotion-Laden 
##    0.17855179 
##    Mixed E.EL 
##    0.06746104 
##   Mixed E.NEU 
##    0.06654445 
##    Mixed EL.E 
##    0.09532539 
##  Mixed EL.NEU 
##    0.12740605 
##   Mixed NEU.E 
##    0.07736022 
##  Mixed NEU.EL 
##    0.15050412 
##       Neutral 
##    0.13913841 
## 
## [[27]]
## 
##        Congruent 
##        0.2730932 
##      Incongruent 
##        0.2747881 
## Pair 1 Congruent 
##        0.2273305 
## Pair 2 Congruent 
##        0.2247881 
## 
## [[28]]
## 
##        Congruent 
##        0.1461045 
##      Incongruent 
##        0.4573786 
## Pair 1 Congruent 
##        0.2082493 
## Pair 2 Congruent 
##        0.1882676 
## 
## [[29]]
## 
##        Negative.Congruent 
##                0.10669111 
##      Negative.Incongruent 
##                0.08230981 
##               Negative.NA 
##                0.03006416 
## Negative.Pair 1 Congruent 
##                0.07112741 
## Negative.Pair 2 Congruent 
##                0.08102658 
##         Neutral.Congruent 
##                0.05242896 
##       Neutral.Incongruent 
##                0.07717690 
##                Neutral.NA 
##                0.07424381 
##  Neutral.Pair 1 Congruent 
##                0.06122823 
##  Neutral.Pair 2 Congruent 
##                0.04894592 
##        Positive.Congruent 
##                0.07717690 
##      Positive.Incongruent 
##                0.07827681 
##               Positive.NA 
##                0.03043080 
## Positive.Pair 1 Congruent 
##                0.06434464 
## Positive.Pair 2 Congruent 
##                0.06452796 
## 
## [[30]]
## 
##        Emotion-Label.Congruent 
##                     0.02584785 
##      Emotion-Label.Incongruent 
##                     0.18130156 
## Emotion-Label.Pair 1 Congruent 
##                     0.03556370 
## Emotion-Label.Pair 2 Congruent 
##                     0.05022915 
##        Emotion-Laden.Congruent 
##                     0.06782768 
##      Emotion-Laden.Incongruent 
##                     0.14555454 
## Emotion-Laden.Pair 1 Congruent 
##                     0.09935839 
## Emotion-Laden.Pair 2 Congruent 
##                     0.08029331 
##              Neutral.Congruent 
##                     0.05242896 
##            Neutral.Incongruent 
##                     0.13052246 
##       Neutral.Pair 1 Congruent 
##                     0.07332722 
##       Neutral.Pair 2 Congruent 
##                     0.05774519 
## 
## [[31]]
## 
##      Primed.Match 
##        0.20238313 
##   Primed.Mismatch 
##        0.22089826 
##         Primed.NA 
##        0.07699358 
##    Satiated.Match 
##        0.20494959 
## Satiated.Mismatch 
##        0.23703025 
##       Satiated.NA 
##        0.05774519 
## 
## [[32]]
##               vars
## Reaction.Time    1
## bdi.tot          2
## stai.y1.tot      3
## stai.y2.tot      4
## Age              5
##                  n
## Reaction.Time 5356
## bdi.tot       5455
## stai.y1.tot   5455
## stai.y2.tot   5455
## Age           5455
##                  mean
## Reaction.Time 1442.91
## bdi.tot         10.58
## stai.y1.tot     38.28
## stai.y2.tot     41.77
## Age             19.26
##                   sd
## Reaction.Time 471.22
## bdi.tot         8.39
## stai.y1.tot     9.04
## stai.y2.tot    10.18
## Age             1.52
##               median
## Reaction.Time 1361.5
## bdi.tot          8.0
## stai.y1.tot     37.0
## stai.y2.tot     40.0
## Age             19.0
##               trimmed
## Reaction.Time 1401.17
## bdi.tot          9.46
## stai.y1.tot     37.18
## stai.y2.tot     41.23
## Age             19.00
##                  mad
## Reaction.Time 457.38
## bdi.tot         7.41
## stai.y1.tot     5.93
## stai.y2.tot    10.38
## Age             1.48
##               min
## Reaction.Time 539
## bdi.tot         0
## stai.y1.tot    24
## stai.y2.tot    22
## Age            18
##                max
## Reaction.Time 2999
## bdi.tot         38
## stai.y1.tot     62
## stai.y2.tot     67
## Age             25
##               range
## Reaction.Time  2460
## bdi.tot          38
## stai.y1.tot      38
## stai.y2.tot      45
## Age               7
##               skew
## Reaction.Time 0.77
## bdi.tot       1.18
## stai.y1.tot   0.94
## stai.y2.tot   0.50
## Age           1.34
##               kurtosis
## Reaction.Time     0.21
## bdi.tot           1.11
## stai.y1.tot       0.34
## stai.y2.tot      -0.46
## Age               1.64
##                 se
## Reaction.Time 6.44
## bdi.tot       0.11
## stai.y1.tot   0.12
## stai.y2.tot   0.14
## Age           0.02
desc.pipe(sl.removed.avg)
## [[1]]
## 
##       n_a 
## 0.2666872 
##       n_b 
## 0.2823747 
##    pilota 
## 0.2325438 
##    pilotb 
## 0.2183943 
## 
## [[2]]
## 
##    Match 
## 0.494617 
## Mismatch 
## 0.505383 
## 
## [[3]]
## 
##          C 
## 0.82282375 
##          E 
## 0.16210397 
##         NR 
## 0.01507229 
## 
## [[4]]
## 
##    Primed 
## 0.4733928 
##  Satiated 
## 0.5266072 
## 
## [[5]]
## 
##  Negative 
## 0.4629345 
##   Neutral 
## 0.1722547 
##  Positive 
## 0.3648108 
## 
## [[6]]
## 
##  Negative 
## 0.3660412 
##   Neutral 
## 0.2900646 
##  Positive 
## 0.3438942 
## 
## [[7]]
## 
##  Negative 
## 0.4952322 
##   Neutral 
## 0.2153184 
##  Positive 
## 0.2894494 
## 
## [[8]]
## 
## Emotion-Label 
##     0.4515534 
## Emotion-Laden 
##     0.3761919 
##       Neutral 
##     0.1722547 
## 
## [[9]]
## 
## Emotion-Label 
##     0.3337435 
## Emotion-Laden 
##     0.3761919 
##       Neutral 
##     0.2900646 
## 
## [[10]]
## 
## Emotion-Label 
##     0.3977238 
## Emotion-Laden 
##     0.3869579 
##       Neutral 
##     0.2153184 
## 
## [[11]]
## 
## Non-Scoring 
##   0.6807136 
##     Scoring 
##   0.3192864 
## 
## [[12]]
## 
## Non-Scoring 
##    0.762227 
##     Scoring 
##    0.237773 
## 
## [[13]]
## 
## Non-Scoring 
##   0.5921255 
##     Scoring 
##   0.4078745 
## 
## [[14]]
## 
## Non-Scoring 
##   0.4318671 
##     Scoring 
##   0.5681329 
## 
## [[15]]
## 
## Non-Scoring 
##   0.5647493 
##     Scoring 
##   0.4352507 
## 
## [[16]]
## 
## Non-Scoring 
##   0.5063058 
##     Scoring 
##   0.4936942 
## 
## [[17]]
## 
##  BDI & STAI 
##   0.1218087 
##    BDI Only 
##   0.1974777 
## Non-Scoring 
##   0.5647493 
##   STAI Only 
##   0.1159643 
## 
## [[18]]
## 
##  BDI & STAI 
##  0.23346663 
##    BDI Only 
##  0.08581975 
## Non-Scoring 
##  0.50630575 
##   STAI Only 
##  0.17440787 
## 
## [[19]]
## 
##  BDI & STAI 
##  0.23346663 
##    BDI Only 
##  0.08581975 
## Non-Scoring 
##  0.43186712 
##   STAI Only 
##  0.24884651 
## 
## [[20]]
## 
## BDI & STAI Y1 & STAI Y2 
##              0.12180867 
##           BDI & STAI Y2 
##              0.11165795 
##                BDI Only 
##              0.08581975 
##             Non-Scoring 
##              0.43186712 
##       STAI Y1 & STAI Y2 
##              0.04152568 
##            STAI Y1 Only 
##              0.07443863 
##            STAI Y2 Only 
##              0.13288219 
## 
## [[21]]
## 
##         18 
## 0.44017225 
##         19 
## 0.24454014 
##         20 
## 0.11442633 
##         21 
## 0.10489080 
##         22 
## 0.06736389 
##         23 
## 0.01291910 
##         25 
## 0.01568748 
## 
## [[22]]
## 
##     Female 
##  0.5924331 
##       Male 
##  0.3946478 
## Non-Binary 
##  0.0129191 
## 
## [[23]]
## 
##  Left-handed 
##   0.09597047 
## Right-handed 
##   0.90402953 
## 
## [[24]]
## 
## Hearing 
##       1 
## 
## [[25]]
## 
##    Match 
## 0.494617 
## Mismatch 
## 0.505383 
## 
## [[26]]
## 
## Emotion-Label 
##    0.20455245 
## Emotion-Laden 
##    0.18240541 
##    Mixed E.EL 
##    0.06459551 
##   Mixed E.NEU 
##    0.06459551 
##    Mixed EL.E 
##    0.08612735 
##  Mixed EL.NEU 
##    0.10765918 
##   Mixed NEU.E 
##    0.10704399 
##  Mixed NEU.EL 
##    0.13995694 
##       Neutral 
##    0.04306367 
## 
## [[27]]
## 
##        Congruent 
##        0.3869579 
##      Incongruent 
##        0.1937865 
## Pair 1 Congruent 
##        0.1507229 
## Pair 2 Congruent 
##        0.2685328 
## 
## [[28]]
## 
##        Congruent 
##        0.1073516 
##      Incongruent 
##        0.5807444 
## Pair 1 Congruent 
##        0.1184251 
## Pair 2 Congruent 
##        0.1934789 
## 
## [[29]]
## 
##        Negative.Congruent 
##                0.23685020 
##      Negative.Incongruent 
##                0.03229775 
## Negative.Pair 1 Congruent 
##                0.06459551 
## Negative.Pair 2 Congruent 
##                0.12919102 
##       Neutral.Incongruent 
##                0.15072285 
##  Neutral.Pair 2 Congruent 
##                0.02153184 
##        Positive.Congruent 
##                0.15010766 
##      Positive.Incongruent 
##                0.01076592 
## Positive.Pair 1 Congruent 
##                0.08612735 
## Positive.Pair 2 Congruent 
##                0.11780990 
## 
## [[30]]
## 
##        Emotion-Label.Congruent 
##                     0.06459551 
##      Emotion-Label.Incongruent 
##                     0.24730852 
## Emotion-Label.Pair 1 Congruent 
##                     0.05382959 
## Emotion-Label.Pair 2 Congruent 
##                     0.08581975 
##        Emotion-Laden.Congruent 
##                     0.04275608 
##      Emotion-Laden.Incongruent 
##                     0.18271301 
## Emotion-Laden.Pair 1 Congruent 
##                     0.06459551 
## Emotion-Laden.Pair 2 Congruent 
##                     0.08612735 
##            Neutral.Incongruent 
##                     0.15072285 
##       Neutral.Pair 2 Congruent 
##                     0.02153184 
## 
## [[31]]
## 
##      Primed.Match 
##         0.2045524 
##   Primed.Mismatch 
##         0.2688404 
##    Satiated.Match 
##         0.2900646 
## Satiated.Mismatch 
##         0.2365426 
## 
## [[32]]
##               vars
## Reaction.Time    1
## bdi.tot          2
## stai.y1.tot      3
## stai.y2.tot      4
## Age              5
##                  n
## Reaction.Time 3202
## bdi.tot       3251
## stai.y1.tot   3251
## stai.y2.tot   3251
## Age           3251
##                  mean
## Reaction.Time 1388.55
## bdi.tot         10.40
## stai.y1.tot     38.22
## stai.y2.tot     41.65
## Age             19.23
##                   sd
## Reaction.Time 461.40
## bdi.tot         8.25
## stai.y1.tot     9.07
## stai.y2.tot    10.08
## Age             1.52
##               median
## Reaction.Time   1300
## bdi.tot            8
## stai.y1.tot       37
## stai.y2.tot       40
## Age               19
##               trimmed
## Reaction.Time 1341.71
## bdi.tot          9.30
## stai.y1.tot     37.10
## stai.y2.tot     41.12
## Age             18.97
##                  mad
## Reaction.Time 430.70
## bdi.tot         7.41
## stai.y1.tot     5.93
## stai.y2.tot    10.38
## Age             1.48
##               min
## Reaction.Time 539
## bdi.tot         0
## stai.y1.tot    24
## stai.y2.tot    22
## Age            18
##                max
## Reaction.Time 2999
## bdi.tot         38
## stai.y1.tot     62
## stai.y2.tot     67
## Age             25
##               range
## Reaction.Time  2460
## bdi.tot          38
## stai.y1.tot      38
## stai.y2.tot      45
## Age               7
##               skew
## Reaction.Time 0.90
## bdi.tot       1.20
## stai.y1.tot   0.96
## stai.y2.tot   0.50
## Age           1.42
##               kurtosis
## Reaction.Time     0.53
## bdi.tot           1.22
## stai.y1.tot       0.36
## stai.y2.tot      -0.43
## Age               1.92
##                 se
## Reaction.Time 8.15
## bdi.tot       0.14
## stai.y1.tot   0.16
## stai.y2.tot   0.18
## Age           0.03
desc.pipe(sl.removed.ind)
## [[1]]
## 
##       n_a 
## 0.2552083 
##       n_b 
## 0.2552083 
##    pilota 
## 0.2460938 
##    pilotb 
## 0.2434896 
## 
## [[2]]
## 
##     Match 
## 0.7721354 
##  Mismatch 
## 0.2278646 
## 
## [[3]]
## 
##         C 
## 0.7434896 
##         E 
## 0.2252604 
##        NR 
## 0.0312500 
## 
## [[4]]
## 
##    Primed 
## 0.4934896 
##  Satiated 
## 0.5065104 
## 
## [[5]]
## 
##  Negative 
## 0.3723958 
##   Neutral 
## 0.4140625 
##  Positive 
## 0.2135417 
## 
## [[6]]
## 
##  Negative 
## 0.4713542 
##   Neutral 
## 0.2682292 
##  Positive 
## 0.2604167 
## 
## [[7]]
## 
##  Negative 
## 0.4179688 
##   Neutral 
## 0.2096354 
##  Positive 
## 0.3723958 
## 
## [[8]]
## 
## Emotion-Label 
##     0.2382812 
## Emotion-Laden 
##     0.3476562 
##       Neutral 
##     0.4140625 
## 
## [[9]]
## 
## Emotion-Label 
##     0.3619792 
## Emotion-Laden 
##     0.3697917 
##       Neutral 
##     0.2682292 
## 
## [[10]]
## 
## Emotion-Label 
##     0.3658854 
## Emotion-Laden 
##     0.4244792 
##       Neutral 
##     0.2096354 
## 
## [[11]]
## 
## Non-Scoring 
##   0.6367188 
##     Scoring 
##   0.3632812 
## 
## [[12]]
## 
## Non-Scoring 
##   0.7408854 
##     Scoring 
##   0.2591146 
## 
## [[13]]
## 
## Non-Scoring 
##   0.5572917 
##     Scoring 
##   0.4427083 
## 
## [[14]]
## 
## Non-Scoring 
##   0.3841146 
##     Scoring 
##   0.6158854 
## 
## [[15]]
## 
## Non-Scoring 
##   0.5117188 
##     Scoring 
##   0.4882812 
## 
## [[16]]
## 
## Non-Scoring 
##   0.4544271 
##     Scoring 
##   0.5455729 
## 
## [[17]]
## 
##  BDI & STAI 
##   0.1341146 
##    BDI Only 
##   0.2291667 
## Non-Scoring 
##   0.5117188 
##   STAI Only 
##   0.1250000 
## 
## [[18]]
## 
##  BDI & STAI 
##   0.2604167 
##    BDI Only 
##   0.1028646 
## Non-Scoring 
##   0.4544271 
##   STAI Only 
##   0.1822917 
## 
## [[19]]
## 
##  BDI & STAI 
##   0.2604167 
##    BDI Only 
##   0.1028646 
## Non-Scoring 
##   0.3841146 
##   STAI Only 
##   0.2526042 
## 
## [[20]]
## 
## BDI & STAI Y1 & STAI Y2 
##               0.1341146 
##           BDI & STAI Y2 
##               0.1263021 
##                BDI Only 
##               0.1028646 
##             Non-Scoring 
##               0.3841146 
##       STAI Y1 & STAI Y2 
##               0.0546875 
##            STAI Y1 Only 
##               0.0703125 
##            STAI Y2 Only 
##               0.1276042 
## 
## [[21]]
## 
##         18 
## 0.44401042 
##         19 
## 0.22916667 
##         20 
## 0.10026042 
##         21 
## 0.10937500 
##         22 
## 0.07942708 
##         23 
## 0.01953125 
##         25 
## 0.01822917 
## 
## [[22]]
## 
##     Female 
##  0.5898438 
##       Male 
##  0.3945312 
## Non-Binary 
##  0.0156250 
## 
## [[23]]
## 
##  Left-handed 
##    0.1132812 
## Right-handed 
##    0.8867188 
## 
## [[24]]
## 
## Hearing 
##       1 
## 
## [[25]]
## 
##     Match 
## 0.7721354 
##  Mismatch 
## 0.2278646 
## 
## [[26]]
## 
## Emotion-Label 
##   0.250000000 
## Emotion-Laden 
##   0.186197917 
##    Mixed E.EL 
##   0.100260417 
##   Mixed E.NEU 
##   0.011718750 
##    Mixed EL.E 
##   0.110677083 
##  Mixed EL.NEU 
##   0.072916667 
##   Mixed NEU.E 
##   0.005208333 
##  Mixed NEU.EL 
##   0.138020833 
##       Neutral 
##   0.125000000 
## 
## [[27]]
## 
##        Congruent 
##       0.60937500 
##      Incongruent 
##       0.16276042 
## Pair 1 Congruent 
##       0.16406250 
## Pair 2 Congruent 
##       0.06380208 
## 
## [[28]]
## 
##        Congruent 
##       0.17447917 
##      Incongruent 
##       0.51302083 
## Pair 1 Congruent 
##       0.23307292 
## Pair 2 Congruent 
##       0.07942708 
## 
## [[29]]
## 
##        Negative.Congruent 
##               0.369791667 
##      Negative.Incongruent 
##               0.001302083 
## Negative.Pair 1 Congruent 
##               0.001302083 
##         Neutral.Congruent 
##               0.096354167 
##       Neutral.Incongruent 
##               0.134114583 
##  Neutral.Pair 1 Congruent 
##               0.131510417 
##  Neutral.Pair 2 Congruent 
##               0.052083333 
##        Positive.Congruent 
##               0.143229167 
##      Positive.Incongruent 
##               0.027343750 
## Positive.Pair 1 Congruent 
##               0.031250000 
## Positive.Pair 2 Congruent 
##               0.011718750 
## 
## [[30]]
## 
##        Emotion-Label.Congruent 
##                     0.03645833 
##      Emotion-Label.Incongruent 
##                     0.16406250 
## Emotion-Label.Pair 1 Congruent 
##                     0.02343750 
## Emotion-Label.Pair 2 Congruent 
##                     0.01432292 
##        Emotion-Laden.Congruent 
##                     0.04166667 
##      Emotion-Laden.Incongruent 
##                     0.21484375 
## Emotion-Laden.Pair 1 Congruent 
##                     0.07812500 
## Emotion-Laden.Pair 2 Congruent 
##                     0.01302083 
##              Neutral.Congruent 
##                     0.09635417 
##            Neutral.Incongruent 
##                     0.13411458 
##       Neutral.Pair 1 Congruent 
##                     0.13151042 
##       Neutral.Pair 2 Congruent 
##                     0.05208333 
## 
## [[31]]
## 
##      Primed.Match 
##        0.42187500 
##   Primed.Mismatch 
##        0.07161458 
##    Satiated.Match 
##        0.35026042 
## Satiated.Mismatch 
##        0.15625000 
## 
## [[32]]
##               vars
## Reaction.Time    1
## bdi.tot          2
## stai.y1.tot      3
## stai.y2.tot      4
## Age              5
##                 n
## Reaction.Time 744
## bdi.tot       768
## stai.y1.tot   768
## stai.y2.tot   768
## Age           768
##                  mean
## Reaction.Time 1442.59
## bdi.tot         11.24
## stai.y1.tot     38.67
## stai.y2.tot     42.32
## Age             19.30
##                   sd
## Reaction.Time 485.27
## bdi.tot         8.68
## stai.y1.tot     8.91
## stai.y2.tot    10.50
## Age             1.61
##               median
## Reaction.Time   1362
## bdi.tot            8
## stai.y1.tot       37
## stai.y2.tot       40
## Age               19
##               trimmed
## Reaction.Time 1404.69
## bdi.tot         10.25
## stai.y1.tot     37.70
## stai.y2.tot     41.76
## Age             19.04
##                  mad
## Reaction.Time 500.38
## bdi.tot         8.90
## stai.y1.tot     5.93
## stai.y2.tot    10.38
## Age             1.48
##               min
## Reaction.Time 574
## bdi.tot         0
## stai.y1.tot    24
## stai.y2.tot    22
## Age            18
##                max
## Reaction.Time 2945
## bdi.tot         38
## stai.y1.tot     62
## stai.y2.tot     67
## Age             25
##               range
## Reaction.Time  2371
## bdi.tot          38
## stai.y1.tot      38
## stai.y2.tot      45
## Age               7
##               skew
## Reaction.Time 0.63
## bdi.tot       1.02
## stai.y1.tot   0.87
## stai.y2.tot   0.48
## Age           1.33
##               kurtosis
## Reaction.Time    -0.19
## bdi.tot           0.56
## stai.y1.tot       0.19
## stai.y2.tot      -0.58
## Age               1.41
##                  se
## Reaction.Time 17.79
## bdi.tot        0.31
## stai.y1.tot    0.32
## stai.y2.tot    0.38
## Age            0.06

13 Accuracy Analyses

rt.participant <- aggregate(sl.trimmed$Reaction.Time, by = list(sl.trimmed$Session.ID), FUN = mean, na.rm = TRUE) %>%
  rename(., Session.ID = "Group.1", Reaction.Time = "x")

#  Accuracy By Participant Pipes
accuracy.df.pipe <- function(sldf) {
  sldf.accuracy <- sldf %>%
    select(., c(Session.ID, Error.Code)) %>%
    table(.) %>%
    data.frame(.) %>%
    recast(., formula = Session.ID ~ Error.Code, id.var = c("Session.ID", "Error.Code")) %>%
    merge(., rt.participant, by = "Session.ID") %>%
    mutate(.,
      Response.Rate = (.$C + .$E) / (.$C + .$E + .$NR),
      Accuracy = .$C / (.$C + .$E)
    ) %>%
    return(.)
}
accuracy.vis.pipe <- function(sldf.accuracy) {
  return(
    list(
      formattable(describe(sldf.accuracy)),
      plot_histogram(sldf.accuracy$Accuracy),
      plot_histogram(sldf.accuracy$Response.Rate),
      plot_histogram(sldf.accuracy$Reaction.Time),
      boxplot(sldf.accuracy$Response.Rate),
      boxplot(sldf.accuracy$Accuracy),
      boxplot(sldf.accuracy$Reaction.Time),
      formattable(cor(sldf.accuracy[2:7]))
    )
  )
}
accuracy.trial.pipe <- function(sldf) {
  acc.trial <- sldf %>%
    recast(., formula = Trial.ID ~ Error.Code, id.var = c("Trial.ID", "Error.Code")) %>%
    mutate(.,
      Response.Rate = (.$C + .$E) / (.$C + .$E + .$NR),
      Accuracy = .$C / (.$C + .$E)
    )
  return(
    list(
      formattable(acc.trial),
      plot_histogram(acc.trial$Accuracy)
    )
  )
}

#  Apply Pipes to Data Frames

acc.sl.trimmed <- sl.trimmed %>%
  accuracy.df.pipe(.) %T>%
  accuracy.vis.pipe(.)

acc.sl.recoded.avg <- sl.recoded.avg %>%
  accuracy.df.pipe(.) %T>%
  accuracy.vis.pipe(.)

acc.sl.recoded.ind <- sl.recoded.ind %>%
  accuracy.df.pipe(.) %T>%
  accuracy.vis.pipe(.)

acc.sl.removed.avg <- sl.removed.avg %>%
  accuracy.df.pipe(.) %T>%
  accuracy.vis.pipe(.)

acc.sl.removed.ind <- sl.removed.ind %>%
  accuracy.df.pipe(.) %T>%
  accuracy.vis.pipe(.)

accuracy.trial.pipe(sl.trimmed)

## [[1]]
## 
## [[2]]
## [[2]]$page_1

accuracy.trial.pipe(sl.recoded.avg)

## [[1]]
## 
## [[2]]
## [[2]]$page_1

accuracy.trial.pipe(sl.recoded.ind)

## [[1]]
## 
## [[2]]
## [[2]]$page_1

accuracy.trial.pipe(sl.removed.avg)

## [[1]]
## 
## [[2]]
## [[2]]$page_1

accuracy.trial.pipe(sl.removed.ind)

## [[1]]
## 
## [[2]]
## [[2]]$page_1

14 ANOVAs

#aov.pipe for the main anova analyses
aov.pipe <- function(sldf){
  return(list(planned3=jmv::ANOVA(
    formula = Reaction.Time ~ X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    data = dplyr::filter(sldf, bdi == "Non-Scoring" & Error.Code == "C"),
    effectSize = c("eta", "partEta"),
    homo = TRUE,
    norm = TRUE,
    qq = TRUE,
    postHoc = ~ X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    postHocCorr = c("tukey", "scheffe"),
    emMeans = ~ X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    emmTables = TRUE),
     wt3=jmv::ANOVA(
    formula = Reaction.Time ~ bdi * X.Primed.Satiated * Target.WT,
    data = dplyr::filter(sldf, Error.Code == "C"),
    effectSize = c("eta", "partEta"),
    homo = TRUE,
    norm = TRUE,
    qq = TRUE,
    postHoc = ~ bdi * X.Primed.Satiated * Target.WT,
    postHocCorr = c("tukey", "scheffe"),
    emMeans = ~ bdi * X.Primed.Satiated * Target.WT,
    emmTables = TRUE),
     v3=jmv::ANOVA(
    formula = Reaction.Time ~ bdi * X.Primed.Satiated * Valence.Congruency.n,
    data = dplyr::filter(sldf, Error.Code == "C"),
    effectSize = c("eta", "partEta"),
    homo = TRUE,
    norm = TRUE,
    qq = TRUE,
    postHoc = ~ bdi * X.Primed.Satiated * Valence.Congruency.n,
    postHocCorr = c("tukey", "scheffe"),
    emMeans = ~ bdi * X.Primed.Satiated * Valence.Congruency.n,
    emmTables = TRUE),
     rta4=jmv::ANOVA(
    formula = Reaction.Time ~ bdi * X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    data = sldf,
    effectSize = c("eta", "partEta"),
    homo = TRUE,
    norm = TRUE,
    qq = TRUE,
    postHoc = ~ bdi * X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    postHocCorr = c("tukey", "scheffe"),
    emMeans = ~ bdi * X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    emmTables = TRUE),
     rtc4=jmv::ANOVA(
    formula = Reaction.Time ~ bdi * X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    data = dplyr::filter(sldf, Error.Code == "C"),
    effectSize = c("eta", "partEta"),
    homo = TRUE,
    norm = TRUE,
    qq = TRUE,
    postHoc = ~ bdi * X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    postHocCorr = c("tukey", "scheffe"),
    emMeans = ~ bdi * X.Primed.Satiated * Word.Type.n * Valence.Congruency.n,
    emmTables = TRUE)))
}

#    aov.pipe(sl.trimmed)
#aov.pipe(sl.recoded.avg)
#  aov.pipe(sl.recoded.ind) # singular
#  aov.pipe(sl.removed.avg) # Too many missing values, imbalanced, disregard
#  aov.pipe(sl.removed.ind) # Too many missing values, imbalanced, disregard

15 Linear Models

sl.regression <- sl.trimmed %>%
  merge(., sam.cat.comp.val[c("Target", "Rated.Mean", "ANEW.Mean", "Difference", "Rated.Category", "ANEW.Category")], by = "Target") %>%
  merge(., sam.comp.ar[c("Target", "Rated.Mean", "ANEW.Mean", "Difference")], by = "Target") %>%
  rename(.,
    "Valence.Rated.Mean" = Rated.Mean.x,
    "Valence.ANEW.Mean" = ANEW.Mean.x,
    "Valence.Difference" = Difference.x,
    "Arousal.Rated.Mean" = Rated.Mean.y,
    "Arousal.ANEW.Mean" = ANEW.Mean.y,
    "Arousal.Difference" = Difference.y
  ) %>%
  mutate(.,
    Valence.ANEW.abs = abs(.$Valence.ANEW.Mean - 4.5),
    Valence.Rated.abs = abs(.$Valence.Rated.Mean - 4.5),
    Arousal.ANEW.abs = abs(.$Arousal.ANEW.Mean - 4.5),
    Arousal.Rated.abs = abs(.$Arousal.Rated.Mean - 4.5)
  )

#write.csv(sl.regression, file = "sl.regression.csv")

big.glm<-glm(Reaction.Time ~
Valence.ANEW.Mean +
  Arousal.ANEW.Mean +
  Valence.Rated.Mean +
  Arousal.Rated.Mean +
  Valence.ANEW.abs +
  Arousal.ANEW.abs +
  Valence.Rated.abs +
  Arousal.Rated.abs +
  bdi.tot +
  stai.y1.tot +
  stai.y2.tot +
  X.Primed.Satiated + 
  Target.WT,
data = sl.regression
) 
baby.glm<-glm(Reaction.Time ~
Valence.ANEW.Mean +
  bdi.tot +
  stai.y1.tot +
  X.Primed.Satiated + 
  Target.WT,
data = sl.regression
) 

big.glm
## 
## Call:  glm(formula = Reaction.Time ~ Valence.ANEW.Mean + Arousal.ANEW.Mean + 
##     Valence.Rated.Mean + Arousal.Rated.Mean + Valence.ANEW.abs + 
##     Arousal.ANEW.abs + Valence.Rated.abs + Arousal.Rated.abs + 
##     bdi.tot + stai.y1.tot + stai.y2.tot + X.Primed.Satiated + 
##     Target.WT, data = sl.regression)
## 
## Coefficients:
##               (Intercept)  
##                   939.569  
##         Valence.ANEW.Mean  
##                    19.582  
##         Arousal.ANEW.Mean  
##                    41.213  
##        Valence.Rated.Mean  
##                   -29.960  
##        Arousal.Rated.Mean  
##                    15.824  
##          Valence.ANEW.abs  
##                    42.039  
##          Arousal.ANEW.abs  
##                   -76.882  
##         Valence.Rated.abs  
##                   -29.946  
##         Arousal.Rated.abs  
##                    -5.260  
##                   bdi.tot  
##                     3.081  
##               stai.y1.tot  
##                     3.573  
##               stai.y2.tot  
##                     1.675  
## X.Primed.SatiatedSatiated  
##                    19.780  
##    Target.WTEmotion-Laden  
##                    -4.127  
##          Target.WTNeutral  
##                   238.829  
## 
## Degrees of Freedom: 5355 Total (i.e. Null);  5341 Residual
##   (99 observations deleted due to missingness)
## Null Deviance:       1.189e+09 
## Residual Deviance: 1.112e+09     AIC: 80810
summary(big.glm)
## 
## Call:
## glm(formula = Reaction.Time ~ Valence.ANEW.Mean + Arousal.ANEW.Mean + 
##     Valence.Rated.Mean + Arousal.Rated.Mean + Valence.ANEW.abs + 
##     Arousal.ANEW.abs + Valence.Rated.abs + Arousal.Rated.abs + 
##     bdi.tot + stai.y1.tot + stai.y2.tot + X.Primed.Satiated + 
##     Target.WT, data = sl.regression)
## 
## Deviance Residuals: 
##     Min  
## -1014.0  
##      1Q  
##  -337.1  
##  Median  
##   -87.9  
##      3Q  
##   267.6  
##     Max  
##  1607.8  
## 
## Coefficients:
##                           Estimate
## (Intercept)               939.5691
## Valence.ANEW.Mean          19.5816
## Arousal.ANEW.Mean          41.2126
## Valence.Rated.Mean        -29.9604
## Arousal.Rated.Mean         15.8244
## Valence.ANEW.abs           42.0389
## Arousal.ANEW.abs          -76.8815
## Valence.Rated.abs         -29.9461
## Arousal.Rated.abs          -5.2604
## bdi.tot                     3.0806
## stai.y1.tot                 3.5726
## stai.y2.tot                 1.6750
## X.Primed.SatiatedSatiated  19.7797
## Target.WTEmotion-Laden     -4.1269
## Target.WTNeutral          238.8287
##                           Std. Error
## (Intercept)                 116.8952
## Valence.ANEW.Mean             7.0821
## Arousal.ANEW.Mean            26.0015
## Valence.Rated.Mean            7.1823
## Arousal.Rated.Mean           11.6970
## Valence.ANEW.abs             17.5215
## Arousal.ANEW.abs             27.6147
## Valence.Rated.abs            11.3341
## Arousal.Rated.abs            13.6025
## bdi.tot                       1.0917
## stai.y1.tot                   0.8185
## stai.y2.tot                   0.9018
## X.Primed.SatiatedSatiated    12.5953
## Target.WTEmotion-Laden       15.1841
## Target.WTNeutral             43.3184
##                           t value
## (Intercept)                 8.038
## Valence.ANEW.Mean           2.765
## Arousal.ANEW.Mean           1.585
## Valence.Rated.Mean         -4.171
## Arousal.Rated.Mean          1.353
## Valence.ANEW.abs            2.399
## Arousal.ANEW.abs           -2.784
## Valence.Rated.abs          -2.642
## Arousal.Rated.abs          -0.387
## bdi.tot                     2.822
## stai.y1.tot                 4.365
## stai.y2.tot                 1.857
## X.Primed.SatiatedSatiated   1.570
## Target.WTEmotion-Laden     -0.272
## Target.WTNeutral            5.513
##                           Pr(>|t|)
## (Intercept)               1.12e-15
## Valence.ANEW.Mean          0.00571
## Arousal.ANEW.Mean          0.11302
## Valence.Rated.Mean        3.08e-05
## Arousal.Rated.Mean         0.17616
## Valence.ANEW.abs           0.01646
## Arousal.ANEW.abs           0.00539
## Valence.Rated.abs          0.00826
## Arousal.Rated.abs          0.69898
## bdi.tot                    0.00479
## stai.y1.tot               1.30e-05
## stai.y2.tot                0.06331
## X.Primed.SatiatedSatiated  0.11638
## Target.WTEmotion-Laden     0.78579
## Target.WTNeutral          3.69e-08
##                              
## (Intercept)               ***
## Valence.ANEW.Mean         ** 
## Arousal.ANEW.Mean            
## Valence.Rated.Mean        ***
## Arousal.Rated.Mean           
## Valence.ANEW.abs          *  
## Arousal.ANEW.abs          ** 
## Valence.Rated.abs         ** 
## Arousal.Rated.abs            
## bdi.tot                   ** 
## stai.y1.tot               ***
## stai.y2.tot               .  
## X.Primed.SatiatedSatiated    
## Target.WTEmotion-Laden       
## Target.WTNeutral          ***
## ---
## Signif. codes:  
##   0  '***'
##   0.001  '**'
##   0.01  '*'
##   0.05  '.'
##   0.1 '  ' 1
## 
## (Dispersion parameter for gaussian family taken to be 208207.5)
## 
##     Null deviance: 1189058537  on 5355  degrees of freedom
## Residual deviance: 1112036168  on 5341  degrees of freedom
##   (99 observations deleted due to missingness)
## AIC: 80808
## 
## Number of Fisher Scoring iterations: 2
summ(big.glm)
Observations 5356 (99 missing obs. deleted)
Dependent variable Reaction.Time
Type Linear regression
𝛘²(14) 77022369.58
Pseudo-R² (Cragg-Uhler) 0.06
Pseudo-R² (McFadden) 0.00
AIC 80807.78
BIC 80913.16
Est. S.E. t val. p
(Intercept) 939.57 116.90 8.04 0.00
Valence.ANEW.Mean 19.58 7.08 2.76 0.01
Arousal.ANEW.Mean 41.21 26.00 1.59 0.11
Valence.Rated.Mean -29.96 7.18 -4.17 0.00
Arousal.Rated.Mean 15.82 11.70 1.35 0.18
Valence.ANEW.abs 42.04 17.52 2.40 0.02
Arousal.ANEW.abs -76.88 27.61 -2.78 0.01
Valence.Rated.abs -29.95 11.33 -2.64 0.01
Arousal.Rated.abs -5.26 13.60 -0.39 0.70
bdi.tot 3.08 1.09 2.82 0.00
stai.y1.tot 3.57 0.82 4.36 0.00
stai.y2.tot 1.68 0.90 1.86 0.06
X.Primed.SatiatedSatiated 19.78 12.60 1.57 0.12
Target.WTEmotion-Laden -4.13 15.18 -0.27 0.79
Target.WTNeutral 238.83 43.32 5.51 0.00
Standard errors: MLE
stepAIC(big.glm)
## Start:  AIC=80807.78
## Reaction.Time ~ Valence.ANEW.Mean + Arousal.ANEW.Mean + Valence.Rated.Mean + 
##     Arousal.Rated.Mean + Valence.ANEW.abs + Arousal.ANEW.abs + 
##     Valence.Rated.abs + Arousal.Rated.abs + bdi.tot + stai.y1.tot + 
##     stai.y2.tot + X.Primed.Satiated + Target.WT
## 
##                      Df
## - Arousal.Rated.abs   1
## - Arousal.Rated.Mean  1
## <none>                 
## - X.Primed.Satiated   1
## - Arousal.ANEW.Mean   1
## - stai.y2.tot         1
## - Valence.ANEW.abs    1
## - Valence.Rated.abs   1
## - Valence.ANEW.Mean   1
## - Arousal.ANEW.abs    1
## - bdi.tot             1
## - Valence.Rated.Mean  1
## - stai.y1.tot         1
## - Target.WT           2
##                        Deviance
## - Arousal.Rated.abs  1112067306
## - Arousal.Rated.Mean 1112417238
## <none>               1112036168
## - X.Primed.Satiated  1112549638
## - Arousal.ANEW.Mean  1112559239
## - stai.y2.tot        1112754475
## - Valence.ANEW.abs   1113234721
## - Valence.Rated.abs  1113489637
## - Valence.ANEW.Mean  1113627906
## - Arousal.ANEW.abs   1113650000
## - bdi.tot            1113694158
## - Valence.Rated.Mean 1115659127
## - stai.y1.tot        1116002457
## - Target.WT          1118684352
##                        AIC
## - Arousal.Rated.abs  80806
## - Arousal.Rated.Mean 80808
## <none>               80808
## - X.Primed.Satiated  80808
## - Arousal.ANEW.Mean  80808
## - stai.y2.tot        80809
## - Valence.ANEW.abs   80812
## - Valence.Rated.abs  80813
## - Valence.ANEW.Mean  80813
## - Arousal.ANEW.abs   80814
## - bdi.tot            80814
## - Valence.Rated.Mean 80823
## - stai.y1.tot        80825
## - Target.WT          80836
## 
## Step:  AIC=80805.93
## Reaction.Time ~ Valence.ANEW.Mean + Arousal.ANEW.Mean + Valence.Rated.Mean + 
##     Arousal.Rated.Mean + Valence.ANEW.abs + Arousal.ANEW.abs + 
##     Valence.Rated.abs + bdi.tot + stai.y1.tot + stai.y2.tot + 
##     X.Primed.Satiated + Target.WT
## 
##                      Df
## - Arousal.Rated.Mean  1
## <none>                 
## - X.Primed.Satiated   1
## - Arousal.ANEW.Mean   1
## - stai.y2.tot         1
## - Valence.ANEW.abs    1
## - Valence.Rated.abs   1
## - bdi.tot             1
## - Valence.ANEW.Mean   1
## - Arousal.ANEW.abs    1
## - stai.y1.tot         1
## - Valence.Rated.Mean  1
## - Target.WT           2
##                        Deviance
## - Arousal.Rated.Mean 1112468932
## <none>               1112067306
## - X.Primed.Satiated  1112583463
## - Arousal.ANEW.Mean  1112722828
## - stai.y2.tot        1112785246
## - Valence.ANEW.abs   1113260550
## - Valence.Rated.abs  1113522993
## - bdi.tot            1113727543
## - Valence.ANEW.Mean  1113887516
## - Arousal.ANEW.abs   1114007469
## - stai.y1.tot        1116032788
## - Valence.Rated.Mean 1116086809
## - Target.WT          1118715489
##                        AIC
## - Arousal.Rated.Mean 80806
## <none>               80806
## - X.Primed.Satiated  80806
## - Arousal.ANEW.Mean  80807
## - stai.y2.tot        80807
## - Valence.ANEW.abs   80810
## - Valence.Rated.abs  80811
## - bdi.tot            80812
## - Valence.ANEW.Mean  80813
## - Arousal.ANEW.abs   80813
## - stai.y1.tot        80823
## - Valence.Rated.Mean 80823
## - Target.WT          80834
## 
## Step:  AIC=80805.86
## Reaction.Time ~ Valence.ANEW.Mean + Arousal.ANEW.Mean + Valence.Rated.Mean + 
##     Valence.ANEW.abs + Arousal.ANEW.abs + Valence.Rated.abs + 
##     bdi.tot + stai.y1.tot + stai.y2.tot + X.Primed.Satiated + 
##     Target.WT
## 
##                      Df
## <none>                 
## - X.Primed.Satiated   1
## - stai.y2.tot         1
## - Valence.Rated.abs   1
## - Valence.ANEW.abs    1
## - Arousal.ANEW.Mean   1
## - bdi.tot             1
## - Valence.ANEW.Mean   1
## - Arousal.ANEW.abs    1
## - Valence.Rated.Mean  1
## - stai.y1.tot         1
## - Target.WT           2
##                        Deviance
## <none>               1112468932
## - X.Primed.Satiated  1112954348
## - stai.y2.tot        1113188489
## - Valence.Rated.abs  1113525372
## - Valence.ANEW.abs   1113574836
## - Arousal.ANEW.Mean  1113704749
## - bdi.tot            1114123158
## - Valence.ANEW.Mean  1114277007
## - Arousal.ANEW.abs   1114513167
## - Valence.Rated.Mean 1116414738
## - stai.y1.tot        1116436431
## - Target.WT          1119551200
##                        AIC
## <none>               80806
## - X.Primed.Satiated  80806
## - stai.y2.tot        80807
## - Valence.Rated.abs  80809
## - Valence.ANEW.abs   80809
## - Arousal.ANEW.Mean  80810
## - bdi.tot            80812
## - Valence.ANEW.Mean  80813
## - Arousal.ANEW.abs   80814
## - Valence.Rated.Mean 80823
## - stai.y1.tot        80823
## - Target.WT          80836
## 
## Call:  glm(formula = Reaction.Time ~ Valence.ANEW.Mean + Arousal.ANEW.Mean + 
##     Valence.Rated.Mean + Valence.ANEW.abs + Arousal.ANEW.abs + 
##     Valence.Rated.abs + bdi.tot + stai.y1.tot + stai.y2.tot + 
##     X.Primed.Satiated + Target.WT, data = sl.regression)
## 
## Coefficients:
##               (Intercept)  
##                  914.2974  
##         Valence.ANEW.Mean  
##                   20.2010  
##         Arousal.ANEW.Mean  
##                   56.5557  
##        Valence.Rated.Mean  
##                  -30.3329  
##          Valence.ANEW.abs  
##                   40.2834  
##          Arousal.ANEW.abs  
##                  -82.1774  
##         Valence.Rated.abs  
##                  -21.1724  
##                   bdi.tot  
##                    3.0771  
##               stai.y1.tot  
##                    3.5731  
##               stai.y2.tot  
##                    1.6765  
## X.Primed.SatiatedSatiated  
##                   19.2189  
##    Target.WTEmotion-Laden  
##                    0.4267  
##          Target.WTNeutral  
##                  247.5221  
## 
## Degrees of Freedom: 5355 Total (i.e. Null);  5343 Residual
##   (99 observations deleted due to missingness)
## Null Deviance:       1.189e+09 
## Residual Deviance: 1.112e+09     AIC: 80810
baby.glm
## 
## Call:  glm(formula = Reaction.Time ~ Valence.ANEW.Mean + bdi.tot + stai.y1.tot + 
##     X.Primed.Satiated + Target.WT, data = sl.regression)
## 
## Coefficients:
##               (Intercept)  
##                 1179.5130  
##         Valence.ANEW.Mean  
##                   -0.9706  
##                   bdi.tot  
##                    4.2605  
##               stai.y1.tot  
##                    4.0200  
## X.Primed.SatiatedSatiated  
##                   16.5076  
##    Target.WTEmotion-Laden  
##                   13.1824  
##          Target.WTNeutral  
##                  220.3413  
## 
## Degrees of Freedom: 5355 Total (i.e. Null);  5349 Residual
##   (99 observations deleted due to missingness)
## Null Deviance:       1.189e+09 
## Residual Deviance: 1.121e+09     AIC: 80840
summary(baby.glm)
## 
## Call:
## glm(formula = Reaction.Time ~ Valence.ANEW.Mean + bdi.tot + stai.y1.tot + 
##     X.Primed.Satiated + Target.WT, data = sl.regression)
## 
## Deviance Residuals: 
##     Min  
## -953.32  
##      1Q  
## -341.70  
##  Median  
##  -88.84  
##      3Q  
##  263.57  
##     Max  
## 1622.70  
## 
## Coefficients:
##                            Estimate
## (Intercept)               1179.5130
## Valence.ANEW.Mean           -0.9706
## bdi.tot                      4.2605
## stai.y1.tot                  4.0200
## X.Primed.SatiatedSatiated   16.5076
## Target.WTEmotion-Laden      13.1824
## Target.WTNeutral           220.3413
##                           Std. Error
## (Intercept)                  32.1603
## Valence.ANEW.Mean             2.7117
## bdi.tot                       0.8775
## stai.y1.tot                   0.7857
## X.Primed.SatiatedSatiated    12.5130
## Target.WTEmotion-Laden       14.5067
## Target.WTNeutral             16.0899
##                           t value
## (Intercept)                36.676
## Valence.ANEW.Mean          -0.358
## bdi.tot                     4.855
## stai.y1.tot                 5.116
## X.Primed.SatiatedSatiated   1.319
## Target.WTEmotion-Laden      0.909
## Target.WTNeutral           13.694
##                           Pr(>|t|)
## (Intercept)                < 2e-16
## Valence.ANEW.Mean            0.720
## bdi.tot                   1.24e-06
## stai.y1.tot               3.23e-07
## X.Primed.SatiatedSatiated    0.187
## Target.WTEmotion-Laden       0.364
## Target.WTNeutral           < 2e-16
##                              
## (Intercept)               ***
## Valence.ANEW.Mean            
## bdi.tot                   ***
## stai.y1.tot               ***
## X.Primed.SatiatedSatiated    
## Target.WTEmotion-Laden       
## Target.WTNeutral          ***
## ---
## Signif. codes:  
##   0  '***'
##   0.001  '**'
##   0.01  '*'
##   0.05  '.'
##   0.1 '  ' 1
## 
## (Dispersion parameter for gaussian family taken to be 209642.2)
## 
##     Null deviance: 1189058537  on 5355  degrees of freedom
## Residual deviance: 1121376326  on 5349  degrees of freedom
##   (99 observations deleted due to missingness)
## AIC: 80837
## 
## Number of Fisher Scoring iterations: 2
summ(baby.glm)
Observations 5356 (99 missing obs. deleted)
Dependent variable Reaction.Time
Type Linear regression
𝛘²(6) 67682211.69
Pseudo-R² (Cragg-Uhler) 0.06
Pseudo-R² (McFadden) 0.00
AIC 80836.58
BIC 80889.27
Est. S.E. t val. p
(Intercept) 1179.51 32.16 36.68 0.00
Valence.ANEW.Mean -0.97 2.71 -0.36 0.72
bdi.tot 4.26 0.88 4.86 0.00
stai.y1.tot 4.02 0.79 5.12 0.00
X.Primed.SatiatedSatiated 16.51 12.51 1.32 0.19
Target.WTEmotion-Laden 13.18 14.51 0.91 0.36
Target.WTNeutral 220.34 16.09 13.69 0.00
Standard errors: MLE
stepAIC(baby.glm)
## Start:  AIC=80836.58
## Reaction.Time ~ Valence.ANEW.Mean + bdi.tot + stai.y1.tot + X.Primed.Satiated + 
##     Target.WT
## 
##                     Df
## - Valence.ANEW.Mean  1
## - X.Primed.Satiated  1
## <none>                
## - bdi.tot            1
## - stai.y1.tot        1
## - Target.WT          2
##                       Deviance
## - Valence.ANEW.Mean 1121403184
## - X.Primed.Satiated 1121741188
## <none>              1121376326
## - bdi.tot           1126318536
## - stai.y1.tot       1126863767
## - Target.WT         1167979783
##                       AIC
## - Valence.ANEW.Mean 80835
## - X.Primed.Satiated 80836
## <none>              80837
## - bdi.tot           80858
## - stai.y1.tot       80861
## - Target.WT         81051
## 
## Step:  AIC=80834.71
## Reaction.Time ~ bdi.tot + stai.y1.tot + X.Primed.Satiated + Target.WT
## 
##                     Df
## - X.Primed.Satiated  1
## <none>                
## - bdi.tot            1
## - stai.y1.tot        1
## - Target.WT          2
##                       Deviance
## - X.Primed.Satiated 1121768986
## <none>              1121403184
## - bdi.tot           1126520427
## - stai.y1.tot       1126986727
## - Target.WT         1168099025
##                       AIC
## - X.Primed.Satiated 80834
## <none>              80835
## - bdi.tot           80857
## - stai.y1.tot       80859
## - Target.WT         81049
## 
## Step:  AIC=80834.45
## Reaction.Time ~ bdi.tot + stai.y1.tot + Target.WT
## 
##               Df
## <none>          
## - bdi.tot      1
## - stai.y1.tot  1
## - Target.WT    2
##                 Deviance
## <none>        1121768986
## - bdi.tot     1126879530
## - stai.y1.tot 1127354756
## - Target.WT   1168450724
##                 AIC
## <none>        80834
## - bdi.tot     80857
## - stai.y1.tot 80859
## - Target.WT   81049
## 
## Call:  glm(formula = Reaction.Time ~ bdi.tot + stai.y1.tot + Target.WT, 
##     data = sl.regression)
## 
## Coefficients:
##            (Intercept)  
##               1183.131  
##                bdi.tot  
##                  4.173  
##            stai.y1.tot  
##                  4.043  
## Target.WTEmotion-Laden  
##                 13.034  
##       Target.WTNeutral  
##                220.380  
## 
## Degrees of Freedom: 5355 Total (i.e. Null);  5351 Residual
##   (99 observations deleted due to missingness)
## Null Deviance:       1.189e+09 
## Residual Deviance: 1.122e+09     AIC: 80830
summ(glm(formula = Reaction.Time ~ bdi.tot + stai.y1.tot + Target.WT, 
    data = sl.regression))
Observations 5356 (99 missing obs. deleted)
Dependent variable Reaction.Time
Type Linear regression
𝛘²(4) 67289551.40
Pseudo-R² (Cragg-Uhler) 0.06
Pseudo-R² (McFadden) 0.00
AIC 80834.45
BIC 80873.97
Est. S.E. t val. p
(Intercept) 1183.13 28.81 41.06 0.00
bdi.tot 4.17 0.85 4.94 0.00
stai.y1.tot 4.04 0.78 5.16 0.00
Target.WTEmotion-Laden 13.03 14.50 0.90 0.37
Target.WTNeutral 220.38 16.09 13.70 0.00
Standard errors: MLE

16 Graphics

#----
rm.exn.ps<-data.frame(Primed.Satiated =c("Primed","Satiated"),
                      Mean =c(1374.988,1419.129),
                      SE =c(34.774,34.774),
                      Lower  =c(1305.742,1349.882),
                      Upper =c(1444.234,1488.375))

ggplot(rm.exn.ps, aes(x= Primed.Satiated, y = Mean)) +
    geom_point(size=3, shape=20)+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Primed/Satiated")+
    theme_grey()+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.exn.wt.ps.val<-data.frame(Word.Type =c(rep("Emotion-label",4),rep("Emotion-laden",4)),
                             Primed.Satiated =c(rep(c("Primed","Primed","Satiated","Satiated"),2)),
                             Participant.Group =c(rep(c("Negative","Positive"),4)),
                             Mean =c(1359.033,1368.900,1425.369,1391.637,1426.055,1345.964,1387.767,1471.741),
                             SD =c(275.526,282.975,305.962,374.682,300.152,288.812,287.703,331.938)) %>%
  mutate(., SE=.$SD/sqrt(35)) %>%
  mutate(., Upper=.$Mean+.$SE,
         Lower=.$Mean-.$SE)

ggplot(rm.exn.wt.ps.val, aes(x= Word.Type, y = Mean, group=Primed.Satiated, color=Primed.Satiated)) +
    geom_point(size=3, shape=20, position=position_dodge(.2))+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13, position=position_dodge(.2)) +
    geom_line(position=position_dodge(.2))+
    facet_grid(cols= vars(Participant.Group)) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type & Primed/Satiated")+
    theme_grey()+
    theme(legend.title=element_blank())+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.exn.bdi.ps<-data.frame(Primed.Satiated =c("Primed","Satiated"),
                          Mean =c(1373.709,1420.407),
                          SE =c(35.029,35.029),
                          Lower  =c(1303.953,1350.650),
                          Upper =c(1443.466,1490.164))

ggplot(rm.exn.bdi.ps, aes(x= Primed.Satiated, y = Mean)) +
    geom_point(size=3, shape=20)+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Primed/Satiated")+
    theme_grey()+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----

rm.exn.bdi.wt.ps.val<-data.frame(Word.Type =c(rep("Emotion-label",4),rep("Emotion-laden",4)),
                                 Primed.Satiated =c(rep(c("Primed","Primed","Satiated","Satiated"),2)),
                                 Participant.Group =c(rep(c("Negative","Positive"),4)),
                                 Mean =c(1359.033,1368.900,1425.369,1391.637,1426.055,1345.964,1387.767,1471.741),
                                 SD =c(275.526,282.975,305.962,374.682,300.152,288.812,287.703,331.938)) %>%
  mutate(., SE=.$SD/sqrt(35)) %>%
  mutate(., Upper=.$Mean+.$SE,
         Lower=.$Mean-.$SE)

ggplot(rm.exn.bdi.wt.ps.val, aes(x= Word.Type, y = Mean, group=Primed.Satiated, color=Primed.Satiated)) +
    geom_point(size=3, shape=20, position=position_dodge(.2))+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13, position=position_dodge(.2)) +
    geom_line(position=position_dodge(.2))+
    facet_grid(cols= vars(Participant.Group)) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type & Primed/Satiated")+
    theme_grey()+
    theme(legend.title=element_blank())+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.incn.wt<-data.frame(Word.Type =c("Emotion-label","Emotion-laden","Neutral"),
                       Mean =c(1386.235,1407.882,1585.602),
                       SE =c(34.363,34.363,34.363),
                       Lower  =c(1318.069,1339.716,1517.436),
                       Upper =c(1454.401,1476.048,1653.768))

ggplot(rm.incn.wt, aes(x= Word.Type, y = Mean)) +
    geom_point(size=3, shape=20)+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type")+
    theme_grey()+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.incn.wt.ps<-data.frame(Word.Type =c("Emotion-label","Emotion-label","Emotion-laden","Emotion-laden","Neutral","Neutral"),
                          Primed.Satiated =c(rep(c("Primed","Satiated"),3)),
                          Mean =c(1363.966,1408.503,1386.010,1429.754,1607.085,1564.119),
                          SE =c(36.394,36.394,36.394,36.394,36.394,36.394),
                          Lower  =c(1291.946,1336.482,1313.989,1357.733,1535.064,1492.098),
                          Upper =c(1435.987,1480.524,1458.030,1501.775,1679.106,1636.140))

ggplot(rm.incn.wt.ps, aes(x= Word.Type, y = Mean, group=Primed.Satiated, color=Primed.Satiated)) +
    geom_point(size=3, shape=20, position=position_dodge(.2))+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13, position=position_dodge(.2)) +
    geom_line(position=position_dodge(.2))+
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type & Primed/Satiated")+
    theme_grey()+
    theme(legend.title=element_blank())+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.incn.wt.ps.val<-data.frame(Word.Type =c(rep("Emotion-label",4),rep("Emotion-laden",4),rep("Neutral",4)),
                              Primed.Satiated =c(rep(c("Primed","Primed","Satiated","Satiated"),3)),
                              Participant.Group =c(rep(c("Negative","Positive"),6)),
                              Mean =c(1359.033,1368.900,1425.369,1391.637,1426.055,1345.964,1387.767,1471.741,1635.989,1578.180,1595.875,1532.362),
                              SD =c(275.526,282.975,305.962,374.682,300.152,288.812,287.703,331.938,266.254,332.288,244.566,339.172)) %>%
  mutate(., SE=.$SD/sqrt(35)) %>%
  mutate(., Upper=.$Mean+.$SE,
         Lower=.$Mean-.$SE)

ggplot(rm.incn.wt.ps.val, aes(x= Word.Type, y = Mean, group=Primed.Satiated,color=Primed.Satiated)) +
    geom_point(size=3, shape=20, position=position_dodge(.2))+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13, position=position_dodge(.2)) +
    geom_line(position=position_dodge(.2))+
    facet_grid(cols= vars(Participant.Group)) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type & Primed/Satiated")+
    theme_grey()+
    theme(legend.title=element_blank())+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.incn.bdi.wt<-data.frame(Word.Type =c("Emotion-label","Emotion-laden","Neutral"),
                           Mean =c(1381.240,1406.022,1592.456),
                           SE =c(34.926,34.926,34.926),
                           Lower  =c(1312.003,1336.785,1523.219),
                           Upper =c(1450.477,1475.260,1661.693))

ggplot(rm.incn.bdi.wt, aes(x= Word.Type, y = Mean)) +
    geom_point(size=3, shape=20)+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type")+
    theme_grey()+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.incn.bdi.wt.ps<-data.frame(Word.Type =c("Emotion-label","Emotion-label","Emotion-laden","Emotion-laden","Neutral","Neutral"),
                              Primed.Satiated =c(rep(c("Primed","Satiated"),3)),
                              Mean =c(1356.469,1406.011,1384.096,1427.949,1616.490,1568.423),
                              SE =c(37.409,37.409,37.409,37.409,37.409,37.409),
                              Lower  =c(1282.504,1332.046,1310.131,1353.984,1542.525,1494.458),
                              Upper =c(1430.433,1479.976,1458.060,1501.913,1690.454,1642.387))

ggplot(rm.incn.bdi.wt.ps, aes(x= Word.Type, y = Mean, group=Primed.Satiated, color=Primed.Satiated)) +
    geom_point(size=3, shape=20, position=position_dodge(.2))+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13, position=position_dodge(.2)) +
    geom_line(position=position_dodge(.2))+
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type & Primed/Satiated")+
    theme_grey()+
    theme(legend.title=element_blank())+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#----
rm.incn.bdi.wt.ps.val<-data.frame(Word.Type =c(rep("Emotion-label",4),rep("Emotion-laden",4),rep("Neutral",4)),
                                Primed.Satiated =c(rep(c("Primed","Primed","Satiated","Satiated"),3)),
                                Participant.Group =c(rep(c("Negative","Positive"),6)),
                                Mean =c(1359.033,1368.900,1425.369,1391.637,1426.055,1345.964,1387.767,1471.741,1635.989,1578.180,1595.875,1532.362),
                                SD =c(275.526,282.975,305.962,374.682,300.152,288.812,287.703,331.938,266.254,332.288,244.566,339.172)) %>%
  mutate(., SE=.$SD/sqrt(35)) %>%
  mutate(., Upper=.$Mean+.$SE,
         Lower=.$Mean-.$SE)

ggplot(rm.incn.bdi.wt.ps.val, aes(x= Word.Type, y = Mean, group=Primed.Satiated, color=Primed.Satiated)) +
    geom_point(size=3, shape=20, position=position_dodge(.2))+
    geom_errorbar(aes(ymin = Lower, ymax = Upper), size=.5, width=.13, position=position_dodge(.2)) +
    geom_line(position=position_dodge(.2))+
    facet_grid(cols= vars(Participant.Group)) +
    ylab("Mean Reaction Time (ms)")+ 
    expand_limits(y = 0:2000) +
    scale_y_continuous(expand = c(0, 0)) +
    ggtitle("Mean Reaction Time by Word Type & Primed/Satiated")+
    theme_grey()+
    theme(legend.title=element_blank())+
    theme(rect = element_rect(fill = "transparent"))+
    theme(text = element_text(size=20),axis.text.x = element_text(angle=20, hjust=1))

#scratch

#sl.factorized <- sl.trimmed %>%
#    mutate_if(sapply(., is.character), as.factor) %>%
#    mutate_if(sapply(., is.factor), fac.to.num)
#sl.agg.desc <- sl.factorized %>%
#    dplyr::select(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, Target.WT, TPRValence, bdi, Error.Code, Reaction.Time) %>%
#    group_by(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, TPRValence, Target.WT, bdi, Error.Code) %>%
#    summarise_all(list(length = length, mean = mean, median = median, sd = sd, min = min, max = max)) %>%
#    rename(RTCMean = "mean", n = "length", Congruency = TPRValence)
#  sl.agg <- sl.factorized %>%
#    dplyr::select(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, Target.WT, TPRValence, bdi, Error.Code, Reaction.Time) %>%
#   group_by(., Session.ID, Participant.Group, X.Primed.Satiated, Target.V, TPRValence, Target.WT, bdi, Error.Code) %>%
#    summarise_all(list(mean = mean)) %>%
#    rename(RTCMean = mean, Congruency = TPRValence)
#    
#sl.agg.rm <- sl.factorized %>%
#    dplyr::select(Session.ID, Participant.Group, X.Primed.Satiated, Target.V, Target.WT, TPRValence, bdi, Error.Code, Reaction.Time) %>%
#    group_by(Session.ID, Participant.Group, X.Primed.Satiated, Target.V, TPRValence, Target.WT, bdi, Error.Code) %>%
#    summarise_all(list(mean = mean)) %>%
#    filter(Error.Code == 1) %>%
#    merge(., c(distinct(sl.agg[, 1:2])), by = "Session.ID") %>%
#    dplyr::select(., -Participant.Group.y) %>%
#    rename(RTCMean = mean, Congruency = TPRValence, Participant.Group = Participant.Group.x) %>%
#    mutate(Participant.Group = ifelse(.$Participant.Group %in% c(1, 2), 1, 2))
#
#  sl.agg.rm.neutral.spss <- sl.agg.rm %>%
#    recast(.,
#      formula = Session.ID ~ Target.WT + X.Primed.Satiated,
#      id.var = c("Session.ID", "Target.WT", "X.Primed.Satiated"),
#      measure.var = "RTCMean",
#      fun = mean
#    ) %>%
#    merge(., c(distinct(sl.agg[, c(1:2, 7)])), by = "Session.ID") %>%
#    mutate(Participant.Group = ifelse(.$Participant.Group %in% c(1, 2), 1, 2))
#  write.csv(sl.agg.rm.neutral.spss, "sl.agg.rm.spss.neutral.csv")